linux/debian/patches/features/all/vserver/vs2.3.0.34.11.patch

28214 lines
754 KiB
Diff
Raw Blame History

--- a/arch/alpha/Kconfig 2008-04-17 12:05:26.000000000 -0400
+++ a/arch/alpha/Kconfig 2008-04-19 15:14:51.000000000 -0400
@@ -671,6 +671,8 @@ config DUMMY_CONSOLE
depends on VGA_HOSE
default y
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/alpha/kernel/entry.S 2008-04-17 11:31:21.000000000 -0400
+++ a/arch/alpha/kernel/entry.S 2008-04-21 11:09:01.000000000 -0400
@@ -872,24 +872,15 @@ sys_getxgid:
.globl sys_getxpid
.ent sys_getxpid
sys_getxpid:
+ lda $sp, -16($sp)
+ stq $26, 0($sp)
.prologue 0
- ldq $2, TI_TASK($8)
- /* See linux/kernel/timer.c sys_getppid for discussion
- about this loop. */
- ldq $3, TASK_GROUP_LEADER($2)
- ldq $4, TASK_REAL_PARENT($3)
- ldl $0, TASK_TGID($2)
-1: ldl $1, TASK_TGID($4)
-#ifdef CONFIG_SMP
- mov $4, $5
- mb
- ldq $3, TASK_GROUP_LEADER($2)
- ldq $4, TASK_REAL_PARENT($3)
- cmpeq $4, $5, $5
- beq $5, 1b
-#endif
- stq $1, 80($sp)
+ lda $16, 96($sp)
+ jsr $26, do_getxpid
+ ldq $26, 0($sp)
+
+ lda $sp, 16($sp)
ret
.end sys_getxpid
--- a/arch/alpha/kernel/osf_sys.c 2008-05-21 14:30:05.000000000 -0400
+++ a/arch/alpha/kernel/osf_sys.c 2008-05-21 14:30:40.000000000 -0400
@@ -883,7 +883,7 @@ osf_gettimeofday(struct timeval32 __user
{
if (tv) {
struct timeval ktv;
- do_gettimeofday(&ktv);
+ vx_gettimeofday(&ktv);
if (put_tv32(tv, &ktv))
return -EFAULT;
}
--- a/arch/alpha/kernel/ptrace.c 2008-04-17 11:31:21.000000000 -0400
+++ a/arch/alpha/kernel/ptrace.c 2008-04-19 15:14:51.000000000 -0400
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/security.h>
#include <linux/signal.h>
+#include <linux/vs_base.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
--- a/arch/alpha/kernel/semaphore.c 2008-04-17 11:31:21.000000000 -0400
+++ a/arch/alpha/kernel/semaphore.c 2008-04-19 15:14:51.000000000 -0400
@@ -68,8 +68,8 @@ __down_failed(struct semaphore *sem)
DECLARE_WAITQUEUE(wait, tsk);
#ifdef CONFIG_DEBUG_SEMAPHORE
- printk("%s(%d): down failed(%p)\n",
- tsk->comm, task_pid_nr(tsk), sem);
+ printk("%s(%d:#%u): down failed(%p)\n",
+ tsk->comm, task_pid_nr(tsk), tsk->xid, sem);
#endif
tsk->state = TASK_UNINTERRUPTIBLE;
@@ -97,8 +97,8 @@ __down_failed(struct semaphore *sem)
wake_up(&sem->wait);
#ifdef CONFIG_DEBUG_SEMAPHORE
- printk("%s(%d): down acquired(%p)\n",
- tsk->comm, task_pid_nr(tsk), sem);
+ printk("%s(%d:#%u): down acquired(%p)\n",
+ tsk->comm, task_pid_nr(tsk), tsk->xid, sem);
#endif
}
@@ -110,8 +110,8 @@ __down_failed_interruptible(struct semap
long ret = 0;
#ifdef CONFIG_DEBUG_SEMAPHORE
- printk("%s(%d): down failed(%p)\n",
- tsk->comm, task_pid_nr(tsk), sem);
+ printk("%s(%d:#%u): down failed(%p)\n",
+ tsk->comm, task_pid_nr(tsk), tsk->xid, sem);
#endif
tsk->state = TASK_INTERRUPTIBLE;
--- a/arch/alpha/kernel/systbls.S 2008-04-17 12:05:26.000000000 -0400
+++ a/arch/alpha/kernel/systbls.S 2008-04-19 15:14:51.000000000 -0400
@@ -446,7 +446,7 @@ sys_call_table:
.quad sys_stat64 /* 425 */
.quad sys_lstat64
.quad sys_fstat64
- .quad sys_ni_syscall /* sys_vserver */
+ .quad sys_vserver /* sys_vserver */
.quad sys_ni_syscall /* sys_mbind */
.quad sys_ni_syscall /* sys_get_mempolicy */
.quad sys_ni_syscall /* sys_set_mempolicy */
--- a/arch/alpha/kernel/traps.c 2008-04-17 11:31:21.000000000 -0400
+++ a/arch/alpha/kernel/traps.c 2008-04-19 15:14:51.000000000 -0400
@@ -182,7 +182,8 @@ die_if_kernel(char * str, struct pt_regs
#ifdef CONFIG_SMP
printk("CPU %d ", hard_smp_processor_id());
#endif
- printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
+ printk("%s(%d[#%u]): %s %ld\n", current->comm,
+ task_pid_nr(current), current->xid, str, err);
dik_show_regs(regs, r9_15);
add_taint(TAINT_DIE);
dik_show_trace((unsigned long *)(regs+1));
--- a/arch/alpha/mm/fault.c 2008-04-17 11:31:21.000000000 -0400
+++ a/arch/alpha/mm/fault.c 2008-04-19 15:14:51.000000000 -0400
@@ -193,8 +193,8 @@ do_page_fault(unsigned long address, uns
down_read(&mm->mmap_sem);
goto survive;
}
- printk(KERN_ALERT "VM: killing process %s(%d)\n",
- current->comm, task_pid_nr(current));
+ printk(KERN_ALERT "VM: killing process %s(%d:#%u)\n",
+ current->comm, task_pid_nr(current), current->xid);
if (!user_mode(regs))
goto no_context;
do_group_exit(SIGKILL);
--- a/arch/arm/Kconfig 2008-04-17 12:05:26.000000000 -0400
+++ a/arch/arm/Kconfig 2008-04-19 15:14:51.000000000 -0400
@@ -1180,6 +1180,8 @@ source "fs/Kconfig"
source "arch/arm/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/arm/kernel/calls.S 2008-04-17 12:05:26.000000000 -0400
+++ a/arch/arm/kernel/calls.S 2008-04-19 15:14:51.000000000 -0400
@@ -322,7 +322,7 @@
/* 310 */ CALL(sys_request_key)
CALL(sys_keyctl)
CALL(ABI(sys_semtimedop, sys_oabi_semtimedop))
-/* vserver */ CALL(sys_ni_syscall)
+ CALL(sys_vserver)
CALL(sys_ioprio_set)
/* 315 */ CALL(sys_ioprio_get)
CALL(sys_inotify_init)
--- a/arch/arm/kernel/process.c 2008-04-17 12:05:26.000000000 -0400
+++ a/arch/arm/kernel/process.c 2008-04-21 11:09:01.000000000 -0400
@@ -264,7 +264,8 @@ void __show_regs(struct pt_regs *regs)
void show_regs(struct pt_regs * regs)
{
printk("\n");
- printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
+ printk("Pid: %d[#%u], comm: %20s\n",
+ task_pid_nr(current), current->xid, current->comm);
__show_regs(regs);
__backtrace();
}
--- a/arch/arm/kernel/traps.c 2008-04-17 12:05:26.000000000 -0400
+++ a/arch/arm/kernel/traps.c 2008-04-19 15:14:51.000000000 -0400
@@ -214,8 +214,8 @@ static void __die(const char *str, int e
str, err, ++die_counter);
print_modules();
__show_regs(regs);
- printk("Process %s (pid: %d, stack limit = 0x%p)\n",
- tsk->comm, task_pid_nr(tsk), thread + 1);
+ printk("Process %s (pid: %d:#%u, stack limit = 0x%p)\n",
+ tsk->comm, task_pid_nr(tsk), tsk->xid, thread + 1);
if (!user_mode(regs) || in_interrupt()) {
dump_mem("Stack: ", regs->ARM_sp,
--- a/arch/arm/mm/fault.c 2008-04-17 12:05:27.000000000 -0400
+++ a/arch/arm/mm/fault.c 2008-04-19 15:14:51.000000000 -0400
@@ -292,7 +292,8 @@ do_page_fault(unsigned long addr, unsign
* happened to us that made us unable to handle
* the page fault gracefully.
*/
- printk("VM: killing process %s\n", tsk->comm);
+ printk("VM: killing process %s(%d:#%u)\n",
+ tsk->comm, task_pid_nr(tsk), tsk->xid);
do_group_exit(SIGKILL);
return 0;
}
--- a/arch/cris/Kconfig 2008-04-17 12:05:27.000000000 -0400
+++ a/arch/cris/Kconfig 2008-04-19 15:14:51.000000000 -0400
@@ -679,6 +679,8 @@ source "drivers/usb/Kconfig"
source "arch/cris/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/frv/kernel/kernel_thread.S 2007-02-04 13:44:54.000000000 -0500
+++ a/arch/frv/kernel/kernel_thread.S 2008-04-21 11:09:01.000000000 -0400
@@ -37,7 +37,7 @@ kernel_thread:
# start by forking the current process, but with shared VM
setlos.p #__NR_clone,gr7 ; syscall number
- ori gr10,#CLONE_VM,gr8 ; first syscall arg [clone_flags]
+ ori gr10,#CLONE_KT,gr8 ; first syscall arg [clone_flags]
sethi.p #0xe4e4,gr9 ; second syscall arg [newsp]
setlo #0xe4e4,gr9
setlos.p #0,gr10 ; third syscall arg [parent_tidptr]
--- a/arch/h8300/Kconfig 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/h8300/Kconfig 2008-04-19 15:14:51.000000000 -0400
@@ -233,6 +233,8 @@ source "fs/Kconfig"
source "arch/h8300/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/ia64/ia32/ia32_entry.S 2008-04-17 10:37:14.000000000 -0400
+++ a/arch/ia64/ia32/ia32_entry.S 2008-04-19 15:14:51.000000000 -0400
@@ -446,7 +446,7 @@ ia32_syscall_table:
data8 sys_tgkill /* 270 */
data8 compat_sys_utimes
data8 sys32_fadvise64_64
- data8 sys_ni_syscall
+ data8 sys32_vserver
data8 sys_ni_syscall
data8 sys_ni_syscall /* 275 */
data8 sys_ni_syscall
--- a/arch/ia64/ia32/sys_ia32.c 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/ia64/ia32/sys_ia32.c 2008-04-19 15:14:51.000000000 -0400
@@ -1177,7 +1177,7 @@ sys32_gettimeofday (struct compat_timeva
{
if (tv) {
struct timeval ktv;
- do_gettimeofday(&ktv);
+ vx_gettimeofday(&ktv);
if (put_tv32(tv, &ktv))
return -EFAULT;
}
--- a/arch/ia64/Kconfig 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/ia64/Kconfig 2008-04-19 15:14:51.000000000 -0400
@@ -615,6 +615,8 @@ source "arch/ia64/hp/sim/Kconfig"
source "arch/ia64/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/ia64/kernel/entry.S 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/ia64/kernel/entry.S 2008-04-19 15:14:51.000000000 -0400
@@ -1547,7 +1547,7 @@ sys_call_table:
data8 sys_mq_notify
data8 sys_mq_getsetattr
data8 sys_kexec_load
- data8 sys_ni_syscall // reserved for vserver
+ data8 sys_vserver
data8 sys_waitid // 1270
data8 sys_add_key
data8 sys_request_key
--- a/arch/ia64/kernel/perfmon.c 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/ia64/kernel/perfmon.c 2008-04-19 15:14:51.000000000 -0400
@@ -40,6 +40,7 @@
#include <linux/capability.h>
#include <linux/rcupdate.h>
#include <linux/completion.h>
+#include <linux/vs_memory.h>
#include <asm/errno.h>
#include <asm/intrinsics.h>
@@ -2374,7 +2375,7 @@ pfm_smpl_buffer_alloc(struct task_struct
*/
insert_vm_struct(mm, vma);
- mm->total_vm += size >> PAGE_SHIFT;
+ vx_vmpages_add(mm, size >> PAGE_SHIFT);
vm_stat_account(vma->vm_mm, vma->vm_flags, vma->vm_file,
vma_pages(vma));
up_write(&task->mm->mmap_sem);
--- a/arch/ia64/kernel/process.c 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/ia64/kernel/process.c 2008-04-21 11:09:01.000000000 -0400
@@ -105,8 +105,8 @@ show_regs (struct pt_regs *regs)
unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
print_modules();
- printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current),
- smp_processor_id(), current->comm);
+ printk("\nPid: %d[#%u], CPU %d, comm: %20s\n", task_pid_nr(current),
+ current->xid, smp_processor_id(), current->comm);
printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s (%s)\n",
regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),
init_utsname()->release);
--- a/arch/ia64/kernel/ptrace.c 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/ia64/kernel/ptrace.c 2008-04-19 15:14:51.000000000 -0400
@@ -17,6 +17,7 @@
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/signal.h>
+#include <linux/vs_base.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
--- a/arch/ia64/kernel/traps.c 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/ia64/kernel/traps.c 2008-04-21 10:33:04.000000000 -0400
@@ -60,8 +60,9 @@ die (const char *str, struct pt_regs *re
put_cpu();
if (++die.lock_owner_depth < 3) {
- printk("%s[%d]: %s %ld [%d]\n",
- current->comm, task_pid_nr(current), str, err, ++die_counter);
+ printk("%s[%d[#%u]]: %s %ld [%d]\n",
+ current->comm, task_pid_nr(current), current->xid,
+ str, err, ++die_counter);
if (notify_die(DIE_OOPS, str, regs, err, 255, SIGSEGV)
!= NOTIFY_STOP)
show_regs(regs);
@@ -324,8 +325,9 @@ handle_fpu_swa (int fp_fault, struct pt_
if ((last.count & 15) < 5 && (ia64_fetchadd(1, &last.count, acq) & 15) < 5) {
last.time = current_jiffies + 5 * HZ;
printk(KERN_WARNING
- "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
- current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
+ "%s(%d[#%u]): floating-point assist fault at ip %016lx, isr %016lx\n",
+ current->comm, task_pid_nr(current), current->xid,
+ regs->cr_iip + ia64_psr(regs)->ri, isr);
}
}
}
--- a/arch/ia64/mm/fault.c 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/ia64/mm/fault.c 2008-04-19 15:14:52.000000000 -0400
@@ -10,6 +10,7 @@
#include <linux/interrupt.h>
#include <linux/kprobes.h>
#include <linux/kdebug.h>
+#include <linux/vs_memory.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
--- a/arch/m32r/kernel/traps.c 2008-04-17 11:31:23.000000000 -0400
+++ a/arch/m32r/kernel/traps.c 2008-04-19 15:14:52.000000000 -0400
@@ -195,8 +195,9 @@ static void show_registers(struct pt_reg
} else {
printk("SPI: %08lx\n", sp);
}
- printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
- current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
+ printk("Process %s (pid: %d[#%u], process nr: %d, stackpage=%08lx)",
+ current->comm, task_pid_nr(current), current->xid,
+ 0xffff & i, 4096+(unsigned long)current);
/*
* When in-kernel, we also print out the stack and code at the
--- a/arch/m68k/Kconfig 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/m68k/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -674,6 +674,8 @@ source "fs/Kconfig"
source "arch/m68k/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/m68k/kernel/ptrace.c 2008-04-17 11:31:23.000000000 -0400
+++ a/arch/m68k/kernel/ptrace.c 2008-04-19 15:14:52.000000000 -0400
@@ -18,6 +18,7 @@
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
+#include <linux/vs_base.h>
#include <asm/uaccess.h>
#include <asm/page.h>
@@ -269,6 +270,8 @@ long arch_ptrace(struct task_struct *chi
ret = ptrace_request(child, request, addr, data);
break;
}
+ if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
+ goto out_tsk;
return ret;
out_eio:
--- a/arch/m68k/kernel/traps.c 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/m68k/kernel/traps.c 2008-04-19 15:14:52.000000000 -0400
@@ -898,8 +898,8 @@ void show_registers(struct pt_regs *regs
printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
regs->d4, regs->d5, regs->a0, regs->a1);
- printk("Process %s (pid: %d, task=%p)\n",
- current->comm, task_pid_nr(current), current);
+ printk("Process %s (pid: %d[#%u], task=%p)\n",
+ current->comm, task_pid_nr(current), current->xid, current);
addr = (unsigned long)&fp->un;
printk("Frame format=%X ", regs->format);
switch (regs->format) {
--- a/arch/m68knommu/Kconfig 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/m68knommu/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -722,6 +722,8 @@ source "fs/Kconfig"
source "arch/m68knommu/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/m68knommu/kernel/traps.c 2008-04-17 10:37:14.000000000 -0400
+++ a/arch/m68knommu/kernel/traps.c 2008-04-19 15:14:52.000000000 -0400
@@ -78,8 +78,9 @@ void die_if_kernel(char *str, struct pt_
printk(KERN_EMERG "d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
fp->d4, fp->d5, fp->a0, fp->a1);
- printk(KERN_EMERG "Process %s (pid: %d, stackpage=%08lx)\n",
- current->comm, current->pid, PAGE_SIZE+(unsigned long)current);
+ printk(KERN_EMERG "Process %s (pid: %d[#%u], stackpage=%08lx)\n",
+ current->comm, task_pid_nr(current), current->xid,
+ PAGE_SIZE+(unsigned long)current);
show_stack(NULL, (unsigned long *)(fp + 1));
add_taint(TAINT_DIE);
do_exit(SIGSEGV);
--- a/arch/mips/Kconfig 2008-04-17 12:05:28.000000000 -0400
+++ a/arch/mips/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -2099,6 +2099,8 @@ source "fs/Kconfig"
source "arch/mips/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/mips/kernel/linux32.c 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/mips/kernel/linux32.c 2008-04-19 15:14:52.000000000 -0400
@@ -209,7 +209,7 @@ sys32_gettimeofday(struct compat_timeval
{
if (tv) {
struct timeval ktv;
- do_gettimeofday(&ktv);
+ vx_gettimeofday(&ktv);
if (put_tv32(tv, &ktv))
return -EFAULT;
}
--- a/arch/mips/kernel/ptrace.c 2008-04-17 11:31:23.000000000 -0400
+++ a/arch/mips/kernel/ptrace.c 2008-04-19 15:14:52.000000000 -0400
@@ -25,6 +25,7 @@
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/seccomp.h>
+#include <linux/vs_base.h>
#include <asm/byteorder.h>
#include <asm/cpu.h>
@@ -171,6 +172,9 @@ long arch_ptrace(struct task_struct *chi
{
int ret;
+ if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
+ goto out;
+
switch (request) {
/* when I and D space are separate, these will need to be fixed. */
case PTRACE_PEEKTEXT: /* read word at location addr. */
--- a/arch/mips/kernel/scall32-o32.S 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/mips/kernel/scall32-o32.S 2008-04-19 15:14:52.000000000 -0400
@@ -619,7 +619,7 @@ einval: li v0, -EINVAL
sys sys_mq_timedreceive 5
sys sys_mq_notify 2 /* 4275 */
sys sys_mq_getsetattr 3
- sys sys_ni_syscall 0 /* sys_vserver */
+ sys sys_vserver 3
sys sys_waitid 5
sys sys_ni_syscall 0 /* available, was setaltroot */
sys sys_add_key 5 /* 4280 */
--- a/arch/mips/kernel/scall64-64.S 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/mips/kernel/scall64-64.S 2008-04-19 15:14:52.000000000 -0400
@@ -434,7 +434,7 @@ sys_call_table:
PTR sys_mq_timedreceive
PTR sys_mq_notify
PTR sys_mq_getsetattr /* 5235 */
- PTR sys_ni_syscall /* sys_vserver */
+ PTR sys_vserver
PTR sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
--- a/arch/mips/kernel/scall64-n32.S 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/mips/kernel/scall64-n32.S 2008-04-19 15:14:52.000000000 -0400
@@ -360,7 +360,7 @@ EXPORT(sysn32_call_table)
PTR compat_sys_mq_timedreceive
PTR compat_sys_mq_notify
PTR compat_sys_mq_getsetattr
- PTR sys_ni_syscall /* 6240, sys_vserver */
+ PTR sys32_vserver /* 6240 */
PTR compat_sys_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
--- a/arch/mips/kernel/scall64-o32.S 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/mips/kernel/scall64-o32.S 2008-04-19 15:14:52.000000000 -0400
@@ -482,7 +482,7 @@ sys_call_table:
PTR compat_sys_mq_timedreceive
PTR compat_sys_mq_notify /* 4275 */
PTR compat_sys_mq_getsetattr
- PTR sys_ni_syscall /* sys_vserver */
+ PTR sys32_vserver
PTR sys32_waitid
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key /* 4280 */
--- a/arch/mips/kernel/traps.c 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/mips/kernel/traps.c 2008-04-19 15:14:52.000000000 -0400
@@ -313,8 +313,9 @@ void show_registers(const struct pt_regs
{
__show_regs(regs);
print_modules();
- printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
- current->comm, task_pid_nr(current), current_thread_info(), current);
+ printk("Process %s (pid: %d:#%u, threadinfo=%p, task=%p)\n",
+ current->comm, task_pid_nr(current), current->xid,
+ current_thread_info(), current);
show_stacktrace(current, regs);
show_code((unsigned int __user *) regs->cp0_epc);
printk("\n");
--- a/arch/mips/mm/fault.c 2008-04-17 11:31:24.000000000 -0400
+++ a/arch/mips/mm/fault.c 2008-04-19 15:14:52.000000000 -0400
@@ -178,7 +178,8 @@ out_of_memory:
down_read(&mm->mmap_sem);
goto survive;
}
- printk("VM: killing process %s\n", tsk->comm);
+ printk("VM: killing process %s(%d:#%u)\n",
+ tsk->comm, tsk->pid, tsk->xid);
if (user_mode(regs))
do_group_exit(SIGKILL);
goto no_context;
--- a/arch/parisc/Kconfig 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/parisc/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -278,6 +278,8 @@ source "fs/Kconfig"
source "arch/parisc/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/parisc/kernel/syscall_table.S 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/parisc/kernel/syscall_table.S 2008-04-19 15:14:52.000000000 -0400
@@ -361,7 +361,7 @@
ENTRY_COMP(mbind) /* 260 */
ENTRY_COMP(get_mempolicy)
ENTRY_COMP(set_mempolicy)
- ENTRY_SAME(ni_syscall) /* 263: reserved for vserver */
+ ENTRY_DIFF(vserver)
ENTRY_SAME(add_key)
ENTRY_SAME(request_key) /* 265 */
ENTRY_SAME(keyctl)
--- a/arch/parisc/kernel/sys_parisc32.c 2008-04-17 11:31:24.000000000 -0400
+++ a/arch/parisc/kernel/sys_parisc32.c 2008-04-19 15:14:52.000000000 -0400
@@ -204,11 +204,11 @@ static inline long get_ts32(struct times
asmlinkage int
sys32_gettimeofday(struct compat_timeval __user *tv, struct timezone __user *tz)
{
- extern void do_gettimeofday(struct timeval *tv);
+ extern void vx_gettimeofday(struct timeval *tv);
if (tv) {
struct timeval ktv;
- do_gettimeofday(&ktv);
+ vx_gettimeofday(&ktv);
if (put_compat_timeval(tv, &ktv))
return -EFAULT;
}
--- a/arch/parisc/kernel/traps.c 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/parisc/kernel/traps.c 2008-04-19 15:14:52.000000000 -0400
@@ -237,8 +237,9 @@ void die_if_kernel(char *str, struct pt_
if (err == 0)
return; /* STFU */
- printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
- current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
+ printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld) at " RFMT "\n",
+ current->comm, task_pid_nr(current), current->xid,
+ str, err, regs->iaoq[0]);
#ifdef PRINT_USER_FAULTS
/* XXX for debugging only */
show_regs(regs);
@@ -270,8 +271,8 @@ KERN_CRIT " || |
pdc_console_restart();
if (err)
- printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
- current->comm, task_pid_nr(current), str, err);
+ printk(KERN_CRIT "%s (pid %d:#%u): %s (code %ld)\n",
+ current->comm, task_pid_nr(current), current->xid, str, err);
/* Wot's wrong wif bein' racy? */
if (current->thread.flags & PARISC_KERNEL_DEATH) {
--- a/arch/parisc/mm/fault.c 2008-04-17 11:31:24.000000000 -0400
+++ a/arch/parisc/mm/fault.c 2008-04-19 15:14:52.000000000 -0400
@@ -210,8 +210,9 @@ bad_area:
#ifdef PRINT_USER_FAULTS
printk(KERN_DEBUG "\n");
- printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
- task_pid_nr(tsk), tsk->comm, code, address);
+ printk(KERN_DEBUG "do_page_fault() pid=%d:#%u "
+ "command='%s' type=%lu address=0x%08lx\n",
+ task_pid_nr(tsk), tsk->xid, tsk->comm, code, address);
if (vma) {
printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
vma->vm_start, vma->vm_end);
@@ -261,7 +262,8 @@ no_context:
out_of_memory:
up_read(&mm->mmap_sem);
- printk(KERN_CRIT "VM: killing process %s\n", current->comm);
+ printk(KERN_CRIT "VM: killing process %s(%d:#%u)\n",
+ current->comm, current->pid, current->xid);
if (user_mode(regs))
do_group_exit(SIGKILL);
goto no_context;
--- a/arch/powerpc/Kconfig 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/powerpc/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -706,6 +706,8 @@ source "lib/Kconfig"
source "arch/powerpc/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
config KEYS_COMPAT
--- a/arch/powerpc/kernel/irq.c 2008-04-17 12:05:29.000000000 -0400
+++ a/arch/powerpc/kernel/irq.c 2008-04-19 15:14:52.000000000 -0400
@@ -53,6 +53,7 @@
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
+#include <linux/vs_context.h>
#include <asm/uaccess.h>
#include <asm/system.h>
--- a/arch/powerpc/kernel/process.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/powerpc/kernel/process.c 2008-04-19 15:14:52.000000000 -0400
@@ -464,8 +464,9 @@ void show_regs(struct pt_regs * regs)
#else
printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
#endif
- printk("TASK = %p[%d] '%s' THREAD: %p",
- current, task_pid_nr(current), current->comm, task_thread_info(current));
+ printk("TASK = %p[%d,#%u] '%s' THREAD: %p",
+ current, task_pid_nr(current), current->xid,
+ current->comm, task_thread_info(current));
#ifdef CONFIG_SMP
printk(" CPU: %d", raw_smp_processor_id());
--- a/arch/powerpc/kernel/sys_ppc32.c 2008-04-17 11:31:24.000000000 -0400
+++ a/arch/powerpc/kernel/sys_ppc32.c 2008-04-19 15:14:52.000000000 -0400
@@ -205,7 +205,7 @@ asmlinkage long compat_sys_gettimeofday(
{
if (tv) {
struct timeval ktv;
- do_gettimeofday(&ktv);
+ vx_gettimeofday(&ktv);
if (put_tv32(tv, &ktv))
return -EFAULT;
}
--- a/arch/powerpc/kernel/traps.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/powerpc/kernel/traps.c 2008-04-19 15:14:52.000000000 -0400
@@ -941,8 +941,9 @@ void nonrecoverable_exception(struct pt_
void trace_syscall(struct pt_regs *regs)
{
- printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
- current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
+ printk("Task: %p(%d[#%u]), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
+ current, task_pid_nr(current), current->xid,
+ regs->nip, regs->link, regs->gpr[0],
regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
}
--- a/arch/powerpc/kernel/vdso.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/powerpc/kernel/vdso.c 2008-04-19 15:14:52.000000000 -0400
@@ -21,6 +21,7 @@
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/bootmem.h>
+#include <linux/vs_memory.h>
#include <asm/pgtable.h>
#include <asm/system.h>
--- a/arch/powerpc/mm/fault.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/powerpc/mm/fault.c 2008-04-19 15:14:52.000000000 -0400
@@ -378,7 +378,8 @@ out_of_memory:
down_read(&mm->mmap_sem);
goto survive;
}
- printk("VM: killing process %s\n", current->comm);
+ printk("VM: killing process %s(%d:#%u)\n",
+ current->comm, current->pid, current->xid);
if (user_mode(regs))
do_group_exit(SIGKILL);
return SIGKILL;
--- a/arch/ppc/Kconfig 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/ppc/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -1261,6 +1261,8 @@ source "lib/Kconfig"
source "arch/ppc/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/ppc/kernel/traps.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/ppc/kernel/traps.c 2008-04-19 15:14:52.000000000 -0400
@@ -669,8 +669,9 @@ void nonrecoverable_exception(struct pt_
void trace_syscall(struct pt_regs *regs)
{
- printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
- current, current->pid, regs->nip, regs->link, regs->gpr[0],
+ printk("Task: %p(%d[#%u]), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
+ current, current->pid, current->xid,
+ regs->nip, regs->link, regs->gpr[0],
regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
}
--- a/arch/ppc/mm/fault.c 2008-04-17 11:31:25.000000000 -0400
+++ a/arch/ppc/mm/fault.c 2008-04-19 15:14:52.000000000 -0400
@@ -295,7 +295,8 @@ out_of_memory:
down_read(&mm->mmap_sem);
goto survive;
}
- printk("VM: killing process %s\n", current->comm);
+ printk("VM: killing process %s(%d:#%u)\n",
+ current->comm, current->pid, current->xid);
if (user_mode(regs))
do_group_exit(SIGKILL);
return SIGKILL;
--- a/arch/s390/Kconfig 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/s390/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -544,6 +544,8 @@ source "fs/Kconfig"
source "arch/s390/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/s390/kernel/compat_linux.c 2008-04-17 11:31:25.000000000 -0400
+++ a/arch/s390/kernel/compat_linux.c 2008-04-19 15:14:52.000000000 -0400
@@ -567,7 +567,7 @@ asmlinkage long sys32_gettimeofday(struc
{
if (tv) {
struct timeval ktv;
- do_gettimeofday(&ktv);
+ vx_gettimeofday(&ktv);
if (put_tv32(tv, &ktv))
return -EFAULT;
}
--- a/arch/s390/kernel/process.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/s390/kernel/process.c 2008-04-21 11:09:01.000000000 -0400
@@ -194,9 +194,9 @@ void show_regs(struct pt_regs *regs)
init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
- printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
- current->comm, current->pid, current,
- (void *) current->thread.ksp);
+ printk("Process %s (pid: %d[#%u], task: %p, ksp: %p)\n",
+ current->comm, current->pid, current->xid,
+ (void *) current, (void *) current->thread.ksp);
show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */
if (!(regs->psw.mask & PSW_MASK_PSTATE))
--- a/arch/s390/kernel/ptrace.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/s390/kernel/ptrace.c 2008-04-19 15:14:52.000000000 -0400
@@ -33,6 +33,7 @@
#include <linux/security.h>
#include <linux/audit.h>
#include <linux/signal.h>
+#include <linux/vs_base.h>
#include <asm/segment.h>
#include <asm/page.h>
@@ -710,7 +711,13 @@ sys_ptrace(long request, long pid, long
goto out;
}
+ if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT)) {
+ ret = -EPERM;
+ goto out_tsk;
+ }
+
ret = do_ptrace(child, request, addr, data);
+out_tsk:
put_task_struct(child);
out:
unlock_kernel();
--- a/arch/s390/kernel/syscalls.S 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/s390/kernel/syscalls.S 2008-04-19 15:14:52.000000000 -0400
@@ -271,7 +271,7 @@ SYSCALL(sys_clock_settime,sys_clock_sett
SYSCALL(sys_clock_gettime,sys_clock_gettime,sys32_clock_gettime_wrapper) /* 260 */
SYSCALL(sys_clock_getres,sys_clock_getres,sys32_clock_getres_wrapper)
SYSCALL(sys_clock_nanosleep,sys_clock_nanosleep,sys32_clock_nanosleep_wrapper)
-NI_SYSCALL /* reserved for vserver */
+SYSCALL(sys_vserver,sys_vserver,sys32_vserver)
SYSCALL(s390_fadvise64_64,sys_ni_syscall,sys32_fadvise64_64_wrapper)
SYSCALL(sys_statfs64,sys_statfs64,compat_sys_statfs64_wrapper)
SYSCALL(sys_fstatfs64,sys_fstatfs64,compat_sys_fstatfs64_wrapper)
--- a/arch/s390/mm/fault.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/s390/mm/fault.c 2008-04-19 15:14:52.000000000 -0400
@@ -217,7 +217,8 @@ static int do_out_of_memory(struct pt_re
down_read(&mm->mmap_sem);
return 1;
}
- printk("VM: killing process %s\n", tsk->comm);
+ printk("VM: killing process %s(%d:#%u)\n",
+ tsk->comm, tsk->pid, tsk->xid);
if (regs->psw.mask & PSW_MASK_PSTATE)
do_group_exit(SIGKILL);
do_no_context(regs, error_code, address);
--- a/arch/sh/Kconfig 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sh/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -913,6 +913,8 @@ source "fs/Kconfig"
source "arch/sh/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/sh/kernel/irq.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sh/kernel/irq.c 2008-04-19 15:14:52.000000000 -0400
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
+#include <linux/vs_context.h>
#include <asm/processor.h>
#include <asm/machvec.h>
#include <asm/uaccess.h>
--- a/arch/sh/kernel/vsyscall/vsyscall.c 2008-04-17 10:37:14.000000000 -0400
+++ a/arch/sh/kernel/vsyscall/vsyscall.c 2008-04-19 15:14:52.000000000 -0400
@@ -19,6 +19,7 @@
#include <linux/elf.h>
#include <linux/sched.h>
#include <linux/err.h>
+#include <linux/vs_memory.h>
/*
* Should the kernel map a VDSO page into processes and pass its
--- a/arch/sparc/Kconfig 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sparc/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -330,6 +330,8 @@ source "fs/Kconfig"
source "arch/sparc/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/sparc/kernel/ptrace.c 2008-05-21 14:30:05.000000000 -0400
+++ a/arch/sparc/kernel/ptrace.c 2008-05-21 14:30:40.000000000 -0400
@@ -21,6 +21,7 @@
#include <linux/signal.h>
#include <linux/regset.h>
#include <linux/elf.h>
+#include <linux/vs_base.h>
#include <asm/pgtable.h>
#include <asm/system.h>
@@ -270,6 +271,10 @@ static int fpregs32_set(struct task_stru
33 * sizeof(u32),
34 * sizeof(u32));
}
+ if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT)) {
+ pt_error_return(regs, ESRCH);
+ goto out_tsk;
+ }
if (!ret)
ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
--- a/arch/sparc/kernel/systbls.S 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sparc/kernel/systbls.S 2008-04-19 15:14:52.000000000 -0400
@@ -70,7 +70,7 @@ sys_call_table:
/*250*/ .long sparc_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
/*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
/*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
-/*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy
+/*265*/ .long sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
/*270*/ .long sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
/*275*/ .long sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
/*280*/ .long sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
--- a/arch/sparc/kernel/traps.c 2008-04-17 11:31:25.000000000 -0400
+++ a/arch/sparc/kernel/traps.c 2008-04-19 15:14:52.000000000 -0400
@@ -99,7 +99,8 @@ void die_if_kernel(char *str, struct pt_
" /_| \\__/ |_\\\n"
" \\__U_/\n");
- printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
+ printk("%s(%d[#%u]): %s [#%d]\n", current->comm,
+ task_pid_nr(current), current->xid, str, ++die_counter);
show_regs(regs);
add_taint(TAINT_DIE);
--- a/arch/sparc/mm/fault.c 2008-04-17 11:31:25.000000000 -0400
+++ a/arch/sparc/mm/fault.c 2008-04-19 15:14:52.000000000 -0400
@@ -367,7 +367,8 @@ no_context:
*/
out_of_memory:
up_read(&mm->mmap_sem);
- printk("VM: killing process %s\n", tsk->comm);
+ printk("VM: killing process %s(%d:#%u)\n",
+ tsk->comm, tsk->pid, tsk->xid);
if (from_user)
do_group_exit(SIGKILL);
goto no_context;
--- a/arch/sparc64/Kconfig 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sparc64/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -471,6 +471,8 @@ source "fs/Kconfig"
source "arch/sparc64/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/sparc64/kernel/binfmt_aout32.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sparc64/kernel/binfmt_aout32.c 2008-04-19 15:14:52.000000000 -0400
@@ -27,6 +27,7 @@
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/init.h>
+#include <linux/vs_memory.h>
#include <asm/system.h>
#include <asm/uaccess.h>
--- a/arch/sparc64/kernel/ptrace.c 2008-05-21 14:30:05.000000000 -0400
+++ a/arch/sparc64/kernel/ptrace.c 2008-05-21 14:30:40.000000000 -0400
@@ -25,6 +25,7 @@
#include <linux/regset.h>
#include <linux/compat.h>
#include <linux/elf.h>
+#include <linux/vs_base.h>
#include <asm/asi.h>
#include <asm/pgtable.h>
@@ -222,6 +223,10 @@ static int genregs64_get(struct task_str
16 * sizeof(u64),
32 * sizeof(u64));
}
+ if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT)) {
+ pt_error_return(regs, ESRCH);
+ goto out_tsk;
+ }
if (!ret) {
/* TSTATE, TPC, TNPC */
--- a/arch/sparc64/kernel/sys_sparc32.c 2008-05-21 14:30:05.000000000 -0400
+++ a/arch/sparc64/kernel/sys_sparc32.c 2008-05-21 14:30:40.000000000 -0400
@@ -722,7 +722,7 @@ asmlinkage long sys32_gettimeofday(struc
{
if (tv) {
struct timeval ktv;
- do_gettimeofday(&ktv);
+ vx_gettimeofday(&ktv);
if (put_tv32(tv, &ktv))
return -EFAULT;
}
--- a/arch/sparc64/kernel/systbls.S 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sparc64/kernel/systbls.S 2008-04-19 15:14:52.000000000 -0400
@@ -71,7 +71,7 @@ sys_call_table32:
/*250*/ .word sys32_mremap, sys32_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl
.word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep
/*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun
- .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy
+ .word sys_timer_delete, compat_sys_timer_create, sys32_vserver, compat_sys_io_setup, sys_io_destroy
/*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink
.word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid
/*280*/ .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat
@@ -144,7 +144,7 @@ sys_call_table:
/*250*/ .word sys64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl
.word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep
/*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun
- .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy
+ .word sys_timer_delete, sys_timer_create, sys_vserver, sys_io_setup, sys_io_destroy
/*270*/ .word sys_io_submit, sys_io_cancel, sys_io_getevents, sys_mq_open, sys_mq_unlink
.word sys_mq_timedsend, sys_mq_timedreceive, sys_mq_notify, sys_mq_getsetattr, sys_waitid
/*280*/ .word sys_tee, sys_add_key, sys_request_key, sys_keyctl, sys_openat
--- a/arch/sparc64/kernel/traps.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sparc64/kernel/traps.c 2008-04-19 15:14:52.000000000 -0400
@@ -2183,7 +2183,8 @@ void die_if_kernel(char *str, struct pt_
" /_| \\__/ |_\\\n"
" \\__U_/\n");
- printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
+ printk("%s(%d[#%u]): %s [#%d]\n", current->comm,
+ task_pid_nr(current), current->xid, str, ++die_counter);
notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
__asm__ __volatile__("flushw");
__show_regs(regs);
--- a/arch/sparc64/mm/fault.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sparc64/mm/fault.c 2008-04-19 15:14:52.000000000 -0400
@@ -453,7 +453,8 @@ handle_kernel_fault:
out_of_memory:
insn = get_fault_insn(regs, insn);
up_read(&mm->mmap_sem);
- printk("VM: killing process %s\n", current->comm);
+ printk("VM: killing process %s(%d:#%u)\n",
+ current->comm, current->pid, current->xid);
if (!(regs->tstate & TSTATE_PRIV))
do_group_exit(SIGKILL);
goto handle_kernel_fault;
--- a/arch/sparc64/solaris/fs.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/sparc64/solaris/fs.c 2008-04-19 15:14:52.000000000 -0400
@@ -368,7 +368,7 @@ static int report_statvfs(struct vfsmoun
int j = strlen (p);
if (j > 15) j = 15;
- if (IS_RDONLY(inode)) i = 1;
+ if (IS_RDONLY(inode) || MNT_IS_RDONLY(mnt)) i = 1;
if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
if (!sysv_valid_dev(inode->i_sb->s_dev))
return -EOVERFLOW;
@@ -404,7 +404,7 @@ static int report_statvfs64(struct vfsmo
int j = strlen (p);
if (j > 15) j = 15;
- if (IS_RDONLY(inode)) i = 1;
+ if (IS_RDONLY(inode) || MNT_IS_RDONLY(mnt)) i = 1;
if (mnt->mnt_flags & MNT_NOSUID) i |= 2;
if (!sysv_valid_dev(inode->i_sb->s_dev))
return -EOVERFLOW;
--- a/arch/um/Kconfig 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/um/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -245,6 +245,8 @@ source "drivers/connector/Kconfig"
source "fs/Kconfig"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/um/kernel/trap.c 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/um/kernel/trap.c 2008-04-19 15:14:52.000000000 -0400
@@ -215,7 +215,8 @@ unsigned long segv(struct faultinfo fi,
current->thread.arch.faultinfo = fi;
force_sig_info(SIGBUS, &si, current);
} else if (err == -ENOMEM) {
- printk(KERN_INFO "VM: killing process %s\n", current->comm);
+ printk(KERN_INFO "VM: killing process %s(%d:#%u)\n",
+ current->comm, task_pid_nr(current), current->xid);
do_exit(SIGKILL);
} else {
BUG_ON(err != -EFAULT);
--- a/arch/v850/Kconfig 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/v850/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -344,6 +344,8 @@ source "drivers/usb/Kconfig"
source "arch/v850/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/v850/kernel/ptrace.c 2008-04-17 10:37:14.000000000 -0400
+++ a/arch/v850/kernel/ptrace.c 2008-04-19 15:14:52.000000000 -0400
@@ -23,6 +23,7 @@
#include <linux/sched.h>
#include <linux/ptrace.h>
#include <linux/signal.h>
+#include <linux/vs_base.h>
#include <asm/errno.h>
#include <asm/ptrace.h>
@@ -116,6 +117,9 @@ long arch_ptrace(struct task_struct *chi
{
int rval;
+ if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
+ goto out;
+
switch (request) {
unsigned long val;
--- a/arch/x86/ia32/ia32entry.S 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/x86/ia32/ia32entry.S 2008-04-22 20:07:49.000000000 -0400
@@ -673,7 +673,7 @@ ia32_sys_call_table:
.quad sys_tgkill /* 270 */
.quad compat_sys_utimes
.quad sys32_fadvise64_64
- .quad quiet_ni_syscall /* sys_vserver */
+ .quad sys32_vserver
.quad sys_mbind
.quad compat_sys_get_mempolicy /* 275 */
.quad sys_set_mempolicy
--- a/arch/x86/Kconfig 2008-05-21 14:30:05.000000000 -0400
+++ a/arch/x86/Kconfig 2008-05-21 14:30:40.000000000 -0400
@@ -1623,6 +1623,8 @@ source "fs/Kconfig"
source "arch/x86/Kconfig.debug"
+source "kernel/vserver/Kconfig"
+
source "security/Kconfig"
source "crypto/Kconfig"
--- a/arch/x86/kernel/syscall_table_32.S 2008-04-17 12:05:30.000000000 -0400
+++ a/arch/x86/kernel/syscall_table_32.S 2008-04-19 15:14:52.000000000 -0400
@@ -272,7 +272,7 @@ ENTRY(sys_call_table)
.long sys_tgkill /* 270 */
.long sys_utimes
.long sys_fadvise64_64
- .long sys_ni_syscall /* sys_vserver */
+ .long sys_vserver
.long sys_mbind
.long sys_get_mempolicy
.long sys_set_mempolicy
--- a/Documentation/vserver/debug.txt 1969-12-31 19:00:00.000000000 -0500
+++ a/Documentation/vserver/debug.txt 2008-04-19 15:14:51.000000000 -0400
@@ -0,0 +1,154 @@
+
+debug_cvirt:
+
+ 2 4 "vx_map_tgid: %p/%llx: %d -> %d"
+ "vx_rmap_tgid: %p/%llx: %d -> %d"
+
+debug_dlim:
+
+ 0 1 "ALLOC (%p,#%d)%c inode (%d)"
+ "FREE (%p,#%d)%c inode"
+ 1 2 "ALLOC (%p,#%d)%c %lld bytes (%d)"
+ "FREE (%p,#%d)%c %lld bytes"
+ 2 4 "ADJUST: %lld,%lld on %ld,%ld [mult=%d]"
+ 3 8 "ext3_has_free_blocks(%p): %lu<%lu+1, %c, %u!=%u r=%d"
+ "ext3_has_free_blocks(%p): free=%lu, root=%lu"
+ "rcu_free_dl_info(%p)"
+ 4 10 "alloc_dl_info(%p,%d) = %p"
+ "dealloc_dl_info(%p)"
+ "get_dl_info(%p[#%d.%d])"
+ "put_dl_info(%p[#%d.%d])"
+ 5 20 "alloc_dl_info(%p,%d)*"
+ 6 40 "__hash_dl_info: %p[#%d]"
+ "__unhash_dl_info: %p[#%d]"
+ 7 80 "locate_dl_info(%p,#%d) = %p"
+
+debug_misc:
+
+ 0 1 "destroy_dqhash: %p [#0x%08x] c=%d"
+ "new_dqhash: %p [#0x%08x]"
+ "vroot[%d]_clr_dev: dev=%p[%lu,%d:%d]"
+ "vroot[%d]_get_real_bdev: dev=%p[%lu,%d:%d]"
+ "vroot[%d]_set_dev: dev=%p[%lu,%d:%d]"
+ "vroot_get_real_bdev not set"
+ 1 2 "cow_break_link(<28>%s<>)"
+ "temp copy <20>%s<>"
+ 2 4 "dentry_open(new): %p"
+ "dentry_open(old): %p"
+ "lookup_create(new): %p"
+ "old path <20>%s<>"
+ "path_lookup(old): %d"
+ "vfs_create(new): %d"
+ "vfs_rename: %d"
+ "vfs_sendfile: %d"
+ 3 8 "fput(new_file=%p[#%d])"
+ "fput(old_file=%p[#%d])"
+ 4 10 "vx_info_kill(%p[#%d],%d,%d) = %d"
+ "vx_info_kill(%p[#%d],%d,%d)*"
+ 5 20 "vs_reboot(%p[#%d],%d)"
+ 6 40 "dropping task %p[#%u,%u] for %p[#%u,%u]"
+
+debug_net:
+
+ 2 4 "nx_addr_conflict(%p,%p) %d.%d,%d.%d"
+ 3 8 "inet_bind(%p) %d.%d.%d.%d, %d.%d.%d.%d, %d.%d.%d.%d"
+ "inet_bind(%p)* %p,%p;%lx %d.%d.%d.%d"
+ 4 10 "ip_route_connect(%p) %p,%p;%lx"
+ 5 20 "__addr_in_socket(%p,%d.%d.%d.%d) %p:%d.%d.%d.%d %p;%lx"
+ 6 40 "sk,egf: %p [#%d] (from %d)"
+ "sk,egn: %p [#%d] (from %d)"
+ "sk,req: %p [#%d] (from %d)"
+ "sk: %p [#%d] (from %d)"
+ "tw: %p [#%d] (from %d)"
+ 7 80 "__sock_recvmsg: %p[%p,%p,%p;%d]:%d/%d"
+ "__sock_sendmsg: %p[%p,%p,%p;%d]:%d/%d"
+
+debug_nid:
+
+ 0 1 "__lookup_nx_info(#%u): %p[#%u]"
+ "alloc_nx_info(%d) = %p"
+ "create_nx_info(%d) (dynamic rejected)"
+ "create_nx_info(%d) = %p (already there)"
+ "create_nx_info(%d) = %p (new)"
+ "dealloc_nx_info(%p)"
+ 1 2 "alloc_nx_info(%d)*"
+ "create_nx_info(%d)*"
+ 2 4 "get_nx_info(%p[#%d.%d])"
+ "put_nx_info(%p[#%d.%d])"
+ 3 8 "claim_nx_info(%p[#%d.%d.%d]) %p"
+ "clr_nx_info(%p[#%d.%d])"
+ "init_nx_info(%p[#%d.%d])"
+ "release_nx_info(%p[#%d.%d.%d]) %p"
+ "set_nx_info(%p[#%d.%d])"
+ 4 10 "__hash_nx_info: %p[#%d]"
+ "__nx_dynamic_id: [#%d]"
+ "__unhash_nx_info: %p[#%d.%d.%d]"
+ 5 20 "moved task %p into nxi:%p[#%d]"
+ "nx_migrate_task(%p,%p[#%d.%d.%d])"
+ "task_get_nx_info(%p)"
+ 6 40 "nx_clear_persistent(%p[#%d])"
+
+debug_quota:
+
+ 0 1 "quota_sync_dqh(%p,%d) discard inode %p"
+ 1 2 "quota_sync_dqh(%p,%d)"
+ "sync_dquots(%p,%d)"
+ "sync_dquots_dqh(%p,%d)"
+ 3 8 "do_quotactl(%p,%d,cmd=%d,id=%d,%p)"
+
+debug_switch:
+
+ 0 1 "vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]"
+ 1 2 "vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]"
+ 4 10 "%s: (%s %s) returned %s with %d"
+
+debug_tag:
+
+ 7 80 "dx_parse_tag(<28>%s<>): %d:#%d"
+ "dx_propagate_tag(%p[#%lu.%d]): %d,%d"
+
+debug_xid:
+
+ 0 1 "__lookup_vx_info(#%u): %p[#%u]"
+ "alloc_vx_info(%d) = %p"
+ "alloc_vx_info(%d)*"
+ "create_vx_info(%d) (dynamic rejected)"
+ "create_vx_info(%d) = %p (already there)"
+ "create_vx_info(%d) = %p (new)"
+ "dealloc_vx_info(%p)"
+ "loc_vx_info(%d) = %p (found)"
+ "loc_vx_info(%d) = %p (new)"
+ "loc_vx_info(%d) = %p (not available)"
+ 1 2 "create_vx_info(%d)*"
+ "loc_vx_info(%d)*"
+ 2 4 "get_vx_info(%p[#%d.%d])"
+ "put_vx_info(%p[#%d.%d])"
+ 3 8 "claim_vx_info(%p[#%d.%d.%d]) %p"
+ "clr_vx_info(%p[#%d.%d])"
+ "init_vx_info(%p[#%d.%d])"
+ "release_vx_info(%p[#%d.%d.%d]) %p"
+ "set_vx_info(%p[#%d.%d])"
+ 4 10 "__hash_vx_info: %p[#%d]"
+ "__unhash_vx_info: %p[#%d.%d.%d]"
+ "__vx_dynamic_id: [#%d]"
+ 5 20 "enter_vx_info(%p[#%d],%p) %p[#%d,%p]"
+ "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]"
+ "moved task %p into vxi:%p[#%d]"
+ "task_get_vx_info(%p)"
+ "vx_migrate_task(%p,%p[#%d.%d])"
+ 6 40 "vx_clear_persistent(%p[#%d])"
+ "vx_exit_init(%p[#%d],%p[#%d,%d,%d])"
+ "vx_set_init(%p[#%d],%p[#%d,%d,%d])"
+ "vx_set_persistent(%p[#%d])"
+ "vx_set_reaper(%p[#%d],%p[#%d,%d])"
+ 7 80 "vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]"
+
+
+debug_limit:
+
+ n 2^n "vx_acc_cres[%5d,%s,%2d]: %5d%s"
+ "vx_cres_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
+
+ m 2^m "vx_acc_page[%5d,%s,%2d]: %5d%s"
+ "vx_acc_pages[%5d,%s,%2d]: %5d += %5d"
+ "vx_pages_avail[%5d,%s,%2d]: %5ld > %5d + %5d"
--- a/drivers/block/Kconfig 2008-04-17 12:05:31.000000000 -0400
+++ a/drivers/block/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -263,6 +263,13 @@ config BLK_DEV_CRYPTOLOOP
instead, which can be configured to be on-disk compatible with the
cryptoloop device.
+config BLK_DEV_VROOT
+ tristate "Virtual Root device support"
+ depends on QUOTACTL
+ ---help---
+ Saying Y here will allow you to use quota/fs ioctls on a shared
+ partition within a virtual server without compromising security.
+
config BLK_DEV_NBD
tristate "Network block device support"
depends on NET
--- a/drivers/block/loop.c 2008-04-17 12:05:32.000000000 -0400
+++ a/drivers/block/loop.c 2008-04-19 15:14:52.000000000 -0400
@@ -76,6 +76,7 @@
#include <linux/gfp.h>
#include <linux/kthread.h>
#include <linux/splice.h>
+#include <linux/vs_context.h>
#include <asm/uaccess.h>
@@ -789,6 +790,7 @@ static int loop_set_fd(struct loop_devic
lo->lo_blocksize = lo_blocksize;
lo->lo_device = bdev;
lo->lo_flags = lo_flags;
+ lo->lo_xid = vx_current_xid();
lo->lo_backing_file = file;
lo->transfer = transfer_none;
lo->ioctl = NULL;
@@ -908,6 +910,7 @@ static int loop_clr_fd(struct loop_devic
lo->lo_encrypt_key_size = 0;
lo->lo_flags = 0;
lo->lo_thread = NULL;
+ lo->lo_xid = 0;
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
@@ -929,7 +932,7 @@ loop_set_status(struct loop_device *lo,
struct loop_func_table *xfer;
if (lo->lo_encrypt_key_size && lo->lo_key_owner != current->uid &&
- !capable(CAP_SYS_ADMIN))
+ !vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP))
return -EPERM;
if (lo->lo_state != Lo_bound)
return -ENXIO;
@@ -1013,7 +1016,8 @@ loop_get_status(struct loop_device *lo,
memcpy(info->lo_crypt_name, lo->lo_crypt_name, LO_NAME_SIZE);
info->lo_encrypt_type =
lo->lo_encryption ? lo->lo_encryption->number : 0;
- if (lo->lo_encrypt_key_size && capable(CAP_SYS_ADMIN)) {
+ if (lo->lo_encrypt_key_size &&
+ vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_CLOOP)) {
info->lo_encrypt_key_size = lo->lo_encrypt_key_size;
memcpy(info->lo_encrypt_key, lo->lo_encrypt_key,
lo->lo_encrypt_key_size);
@@ -1322,6 +1326,9 @@ static int lo_open(struct inode *inode,
{
struct loop_device *lo = inode->i_bdev->bd_disk->private_data;
+ if (!vx_check(lo->lo_xid, VS_IDENT|VS_HOSTID|VS_ADMIN_P))
+ return -EACCES;
+
mutex_lock(&lo->lo_ctl_mutex);
lo->lo_refcnt++;
mutex_unlock(&lo->lo_ctl_mutex);
--- a/drivers/block/Makefile 2008-04-17 12:05:31.000000000 -0400
+++ a/drivers/block/Makefile 2008-04-19 15:14:52.000000000 -0400
@@ -29,5 +29,6 @@ obj-$(CONFIG_VIRTIO_BLK) += virtio_blk.o
obj-$(CONFIG_VIODASD) += viodasd.o
obj-$(CONFIG_BLK_DEV_SX8) += sx8.o
obj-$(CONFIG_BLK_DEV_UB) += ub.o
+obj-$(CONFIG_BLK_DEV_VROOT) += vroot.o
obj-$(CONFIG_XEN_BLKDEV_FRONTEND) += xen-blkfront.o
--- a/drivers/block/vroot.c 1969-12-31 19:00:00.000000000 -0500
+++ a/drivers/block/vroot.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,280 @@
+/*
+ * linux/drivers/block/vroot.c
+ *
+ * written by Herbert P<>tzl, 9/11/2002
+ * ported to 2.6.10 by Herbert P<>tzl, 30/12/2004
+ *
+ * based on the loop.c code by Theodore Ts'o.
+ *
+ * Copyright (C) 2002-2007 by Herbert P<>tzl.
+ * Redistribution of this file is permitted under the
+ * GNU General Public License.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/file.h>
+#include <linux/major.h>
+#include <linux/blkdev.h>
+
+#include <linux/vroot.h>
+#include <linux/vs_context.h>
+
+
+static int max_vroot = 8;
+
+static struct vroot_device *vroot_dev;
+static struct gendisk **disks;
+
+
+static int vroot_set_dev(
+ struct vroot_device *vr,
+ struct file *vr_file,
+ struct block_device *bdev,
+ unsigned int arg)
+{
+ struct block_device *real_bdev;
+ struct file *file;
+ struct inode *inode;
+ int error;
+
+ error = -EBUSY;
+ if (vr->vr_state != Vr_unbound)
+ goto out;
+
+ error = -EBADF;
+ file = fget(arg);
+ if (!file)
+ goto out;
+
+ error = -EINVAL;
+ inode = file->f_dentry->d_inode;
+
+
+ if (S_ISBLK(inode->i_mode)) {
+ real_bdev = inode->i_bdev;
+ vr->vr_device = real_bdev;
+ __iget(real_bdev->bd_inode);
+ } else
+ goto out_fput;
+
+ vxdprintk(VXD_CBIT(misc, 0),
+ "vroot[%d]_set_dev: dev=" VXF_DEV,
+ vr->vr_number, VXD_DEV(real_bdev));
+
+ vr->vr_state = Vr_bound;
+ error = 0;
+
+ out_fput:
+ fput(file);
+ out:
+ return error;
+}
+
+static int vroot_clr_dev(
+ struct vroot_device *vr,
+ struct file *vr_file,
+ struct block_device *bdev)
+{
+ struct block_device *real_bdev;
+
+ if (vr->vr_state != Vr_bound)
+ return -ENXIO;
+ if (vr->vr_refcnt > 1) /* we needed one fd for the ioctl */
+ return -EBUSY;
+
+ real_bdev = vr->vr_device;
+
+ vxdprintk(VXD_CBIT(misc, 0),
+ "vroot[%d]_clr_dev: dev=" VXF_DEV,
+ vr->vr_number, VXD_DEV(real_bdev));
+
+ bdput(real_bdev);
+ vr->vr_state = Vr_unbound;
+ vr->vr_device = NULL;
+ return 0;
+}
+
+
+static int vr_ioctl(struct inode *inode, struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct vroot_device *vr = inode->i_bdev->bd_disk->private_data;
+ int err;
+
+ down(&vr->vr_ctl_mutex);
+ switch (cmd) {
+ case VROOT_SET_DEV:
+ err = vroot_set_dev(vr, file, inode->i_bdev, arg);
+ break;
+ case VROOT_CLR_DEV:
+ err = vroot_clr_dev(vr, file, inode->i_bdev);
+ break;
+ default:
+ err = -EINVAL;
+ break;
+ }
+ up(&vr->vr_ctl_mutex);
+ return err;
+}
+
+static int vr_open(struct inode *inode, struct file *file)
+{
+ struct vroot_device *vr = inode->i_bdev->bd_disk->private_data;
+
+ down(&vr->vr_ctl_mutex);
+ vr->vr_refcnt++;
+ up(&vr->vr_ctl_mutex);
+ return 0;
+}
+
+static int vr_release(struct inode *inode, struct file *file)
+{
+ struct vroot_device *vr = inode->i_bdev->bd_disk->private_data;
+
+ down(&vr->vr_ctl_mutex);
+ --vr->vr_refcnt;
+ up(&vr->vr_ctl_mutex);
+ return 0;
+}
+
+static struct block_device_operations vr_fops = {
+ .owner = THIS_MODULE,
+ .open = vr_open,
+ .release = vr_release,
+ .ioctl = vr_ioctl,
+};
+
+struct block_device *__vroot_get_real_bdev(struct block_device *bdev)
+{
+ struct inode *inode = bdev->bd_inode;
+ struct vroot_device *vr;
+ struct block_device *real_bdev;
+ int minor = iminor(inode);
+
+ vr = &vroot_dev[minor];
+ real_bdev = vr->vr_device;
+
+ vxdprintk(VXD_CBIT(misc, 0),
+ "vroot[%d]_get_real_bdev: dev=" VXF_DEV,
+ vr->vr_number, VXD_DEV(real_bdev));
+
+ if (vr->vr_state != Vr_bound)
+ return ERR_PTR(-ENXIO);
+
+ __iget(real_bdev->bd_inode);
+ return real_bdev;
+}
+
+/*
+ * And now the modules code and kernel interface.
+ */
+
+module_param(max_vroot, int, 0);
+
+MODULE_PARM_DESC(max_vroot, "Maximum number of vroot devices (1-256)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_BLOCKDEV_MAJOR(VROOT_MAJOR);
+
+MODULE_AUTHOR ("Herbert P<>tzl");
+MODULE_DESCRIPTION ("Virtual Root Device Mapper");
+
+
+int __init vroot_init(void)
+{
+ int err, i;
+
+ if (max_vroot < 1 || max_vroot > 256) {
+ max_vroot = MAX_VROOT_DEFAULT;
+ printk(KERN_WARNING "vroot: invalid max_vroot "
+ "(must be between 1 and 256), "
+ "using default (%d)\n", max_vroot);
+ }
+
+ if (register_blkdev(VROOT_MAJOR, "vroot"))
+ return -EIO;
+
+ err = -ENOMEM;
+ vroot_dev = kmalloc(max_vroot * sizeof(struct vroot_device), GFP_KERNEL);
+ if (!vroot_dev)
+ goto out_mem1;
+ memset(vroot_dev, 0, max_vroot * sizeof(struct vroot_device));
+
+ disks = kmalloc(max_vroot * sizeof(struct gendisk *), GFP_KERNEL);
+ if (!disks)
+ goto out_mem2;
+
+ for (i = 0; i < max_vroot; i++) {
+ disks[i] = alloc_disk(1);
+ if (!disks[i])
+ goto out_mem3;
+ }
+
+ for (i = 0; i < max_vroot; i++) {
+ struct vroot_device *vr = &vroot_dev[i];
+ struct gendisk *disk = disks[i];
+
+ memset(vr, 0, sizeof(*vr));
+ init_MUTEX(&vr->vr_ctl_mutex);
+ vr->vr_number = i;
+ disk->major = VROOT_MAJOR;
+ disk->first_minor = i;
+ disk->fops = &vr_fops;
+ sprintf(disk->disk_name, "vroot%d", i);
+ disk->private_data = vr;
+ }
+
+ err = register_vroot_grb(&__vroot_get_real_bdev);
+ if (err)
+ goto out_mem3;
+
+ for (i = 0; i < max_vroot; i++)
+ add_disk(disks[i]);
+ printk(KERN_INFO "vroot: loaded (max %d devices)\n", max_vroot);
+ return 0;
+
+out_mem3:
+ while (i--)
+ put_disk(disks[i]);
+ kfree(disks);
+out_mem2:
+ kfree(vroot_dev);
+out_mem1:
+ unregister_blkdev(VROOT_MAJOR, "vroot");
+ printk(KERN_ERR "vroot: ran out of memory\n");
+ return err;
+}
+
+void vroot_exit(void)
+{
+ int i;
+
+ if (unregister_vroot_grb(&__vroot_get_real_bdev))
+ printk(KERN_WARNING "vroot: cannot unregister grb\n");
+
+ for (i = 0; i < max_vroot; i++) {
+ del_gendisk(disks[i]);
+ put_disk(disks[i]);
+ }
+ unregister_blkdev(VROOT_MAJOR, "vroot");
+
+ kfree(disks);
+ kfree(vroot_dev);
+}
+
+module_init(vroot_init);
+module_exit(vroot_exit);
+
+#ifndef MODULE
+
+static int __init max_vroot_setup(char *str)
+{
+ max_vroot = simple_strtol(str, NULL, 0);
+ return 1;
+}
+
+__setup("max_vroot=", max_vroot_setup);
+
+#endif
+
--- a/drivers/char/sysrq.c 2008-04-17 11:31:27.000000000 -0400
+++ a/drivers/char/sysrq.c 2008-04-19 15:14:52.000000000 -0400
@@ -37,6 +37,7 @@
#include <linux/irq.h>
#include <linux/hrtimer.h>
#include <linux/oom.h>
+#include <linux/vserver/debug.h>
#include <asm/ptrace.h>
#include <asm/irq_regs.h>
@@ -310,6 +311,21 @@ static struct sysrq_key_op sysrq_unrt_op
.enable_mask = SYSRQ_ENABLE_RTNICE,
};
+
+#ifdef CONFIG_VSERVER_DEBUG
+static void sysrq_handle_vxinfo(int key, struct tty_struct *tty)
+{
+ dump_vx_info_inactive((key == 'x')?0:1);
+}
+
+static struct sysrq_key_op sysrq_showvxinfo_op = {
+ .handler = sysrq_handle_vxinfo,
+ .help_msg = "conteXt",
+ .action_msg = "Show Context Info",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+#endif
+
/* Key Operations table and lock */
static DEFINE_SPINLOCK(sysrq_key_table_lock);
@@ -358,7 +374,11 @@ static struct sysrq_key_op *sysrq_key_ta
/* x: May be registered on ppc/powerpc for xmon */
NULL, /* x */
NULL, /* y */
- NULL /* z */
+#ifdef CONFIG_VSERVER_DEBUG
+ &sysrq_showvxinfo_op, /* z */
+#else
+ NULL, /* z */
+#endif
};
/* key2index calculation, -1 on invalid index */
@@ -370,6 +390,8 @@ static int sysrq_key_table_key2index(int
retval = key - '0';
else if ((key >= 'a') && (key <= 'z'))
retval = key + 10 - 'a';
+ else if ((key >= 'A') && (key <= 'Z'))
+ retval = key + 10 - 'A';
else
retval = -1;
return retval;
--- a/drivers/char/tty_io.c 2008-04-17 12:05:32.000000000 -0400
+++ a/drivers/char/tty_io.c 2008-04-19 15:14:52.000000000 -0400
@@ -105,6 +105,7 @@
#include <linux/kmod.h>
#include <linux/nsproxy.h>
+#include <linux/vs_pid.h>
#undef TTY_DEBUG_HANGUP
@@ -3142,6 +3143,7 @@ static int tiocspgrp(struct tty_struct *
return -ENOTTY;
if (get_user(pgrp_nr, p))
return -EFAULT;
+ pgrp_nr = vx_rmap_pid(pgrp_nr);
if (pgrp_nr < 0)
return -EINVAL;
rcu_read_lock();
--- a/drivers/infiniband/hw/ipath/ipath_user_pages.c 2008-04-17 10:37:17.000000000 -0400
+++ a/drivers/infiniband/hw/ipath/ipath_user_pages.c 2008-04-19 15:14:52.000000000 -0400
@@ -33,6 +33,7 @@
#include <linux/mm.h>
#include <linux/device.h>
+#include <linux/vs_memory.h>
#include "ipath_kernel.h"
@@ -61,7 +62,8 @@ static int __get_user_pages(unsigned lon
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >>
PAGE_SHIFT;
- if (num_pages > lock_limit) {
+ if (num_pages > lock_limit ||
+ !vx_vmlocked_avail(current->mm, num_pages)) {
ret = -ENOMEM;
goto bail;
}
@@ -78,7 +80,7 @@ static int __get_user_pages(unsigned lon
goto bail_release;
}
- current->mm->locked_vm += num_pages;
+ vx_vmlocked_add(current->mm, num_pages);
ret = 0;
goto bail;
@@ -177,7 +179,7 @@ void ipath_release_user_pages(struct pag
__ipath_release_user_pages(p, num_pages, 1);
- current->mm->locked_vm -= num_pages;
+ vx_vmlocked_sub(current->mm, num_pages);
up_write(&current->mm->mmap_sem);
}
@@ -194,7 +196,7 @@ static void user_pages_account(struct wo
container_of(_work, struct ipath_user_pages_work, work);
down_write(&work->mm->mmap_sem);
- work->mm->locked_vm -= work->num_pages;
+ vx_vmlocked_sub(work->mm, work->num_pages);
up_write(&work->mm->mmap_sem);
mmput(work->mm);
kfree(work);
--- a/drivers/md/dm.c 2008-04-17 12:05:33.000000000 -0400
+++ a/drivers/md/dm.c 2008-04-19 15:14:52.000000000 -0400
@@ -22,6 +22,7 @@
#include <linux/hdreg.h>
#include <linux/blktrace_api.h>
#include <linux/smp_lock.h>
+#include <linux/vs_base.h>
#define DM_MSG_PREFIX "core"
@@ -91,6 +92,7 @@ struct mapped_device {
rwlock_t map_lock;
atomic_t holders;
atomic_t open_count;
+ xid_t xid;
unsigned long flags;
@@ -250,6 +252,7 @@ static void __exit dm_exit(void)
static int dm_blk_open(struct inode *inode, struct file *file)
{
struct mapped_device *md;
+ int ret = -ENXIO;
spin_lock(&_minor_lock);
@@ -258,18 +261,19 @@ static int dm_blk_open(struct inode *ino
goto out;
if (test_bit(DMF_FREEING, &md->flags) ||
- test_bit(DMF_DELETING, &md->flags)) {
- md = NULL;
+ test_bit(DMF_DELETING, &md->flags))
+ goto out;
+
+ ret = -EACCES;
+ if (!vx_check(md->xid, VS_IDENT|VS_HOSTID))
goto out;
- }
dm_get(md);
atomic_inc(&md->open_count);
-
+ ret = 0;
out:
spin_unlock(&_minor_lock);
-
- return md ? 0 : -ENXIO;
+ return ret;
}
static int dm_blk_close(struct inode *inode, struct file *file)
@@ -465,6 +469,14 @@ int dm_set_geometry(struct mapped_device
return 0;
}
+/*
+ * Get the xid associated with a dm device
+ */
+xid_t dm_get_xid(struct mapped_device *md)
+{
+ return md->xid;
+}
+
/*-----------------------------------------------------------------
* CRUD START:
* A more elegant soln is in the works that uses the queue
@@ -1022,6 +1034,7 @@ static struct mapped_device *alloc_dev(i
INIT_LIST_HEAD(&md->uevent_list);
spin_lock_init(&md->uevent_lock);
+ md->xid = vx_current_xid();
md->queue = blk_alloc_queue(GFP_KERNEL);
if (!md->queue)
goto bad_queue;
--- a/drivers/md/dm.h 2008-04-17 11:31:28.000000000 -0400
+++ a/drivers/md/dm.h 2008-04-19 15:14:52.000000000 -0400
@@ -127,6 +127,8 @@ void dm_put_target_type(struct target_ty
int dm_target_iterate(void (*iter_func)(struct target_type *tt,
void *param), void *param);
+xid_t dm_get_xid(struct mapped_device *md);
+
/*-----------------------------------------------------------------
* Useful inlines.
*---------------------------------------------------------------*/
--- a/drivers/md/dm-ioctl.c 2008-04-17 12:05:33.000000000 -0400
+++ a/drivers/md/dm-ioctl.c 2008-04-21 10:45:53.000000000 -0400
@@ -16,6 +16,7 @@
#include <linux/dm-ioctl.h>
#include <linux/hdreg.h>
#include <linux/compat.h>
+#include <linux/vs_context.h>
#include <asm/uaccess.h>
@@ -101,7 +102,8 @@ static struct hash_cell *__get_name_cell
unsigned int h = hash_str(str);
list_for_each_entry (hc, _name_buckets + h, name_list)
- if (!strcmp(hc->name, str)) {
+ if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
+ !strcmp(hc->name, str)) {
dm_get(hc->md);
return hc;
}
@@ -115,7 +117,8 @@ static struct hash_cell *__get_uuid_cell
unsigned int h = hash_str(str);
list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
- if (!strcmp(hc->uuid, str)) {
+ if (vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT) &&
+ !strcmp(hc->uuid, str)) {
dm_get(hc->md);
return hc;
}
@@ -352,6 +355,9 @@ typedef int (*ioctl_fn)(struct dm_ioctl
static int remove_all(struct dm_ioctl *param, size_t param_size)
{
+ if (!vx_check(0, VS_ADMIN))
+ return -EPERM;
+
dm_hash_remove_all(1);
param->data_size = 0;
return 0;
@@ -399,6 +405,8 @@ static int list_devices(struct dm_ioctl
*/
for (i = 0; i < NUM_BUCKETS; i++) {
list_for_each_entry (hc, _name_buckets + i, name_list) {
+ if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
+ continue;
needed += sizeof(struct dm_name_list);
needed += strlen(hc->name) + 1;
needed += ALIGN_MASK;
@@ -422,6 +430,8 @@ static int list_devices(struct dm_ioctl
*/
for (i = 0; i < NUM_BUCKETS; i++) {
list_for_each_entry (hc, _name_buckets + i, name_list) {
+ if (!vx_check(dm_get_xid(hc->md), VS_WATCH_P | VS_IDENT))
+ continue;
if (old_nl)
old_nl->next = (uint32_t) ((void *) nl -
(void *) old_nl);
@@ -612,10 +622,11 @@ static struct hash_cell *__find_device_h
if (!md)
goto out;
- mdptr = dm_get_mdptr(md);
+ if (vx_check(dm_get_xid(md), VS_WATCH_P | VS_IDENT))
+ mdptr = dm_get_mdptr(md);
+
if (!mdptr)
dm_put(md);
-
out:
return mdptr;
}
@@ -1406,8 +1417,8 @@ static int ctl_ioctl(uint command, struc
ioctl_fn fn = NULL;
size_t param_size;
- /* only root can play with this */
- if (!capable(CAP_SYS_ADMIN))
+ /* only root and certain contexts can play with this */
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_ADMIN_MAPPER))
return -EACCES;
if (_IOC_TYPE(command) != DM_IOCTL)
--- a/drivers/net/tun.c 2008-04-17 12:05:36.000000000 -0400
+++ a/drivers/net/tun.c 2008-04-19 15:39:04.000000000 -0400
@@ -62,6 +62,7 @@
#include <linux/if_ether.h>
#include <linux/if_tun.h>
#include <linux/crc32.h>
+#include <linux/vs_network.h>
#include <net/net_namespace.h>
#include <asm/system.h>
@@ -86,6 +87,7 @@ struct tun_struct {
int attached;
uid_t owner;
gid_t group;
+ nid_t nid;
wait_queue_head_t read_wait;
struct sk_buff_head readq;
@@ -465,6 +467,7 @@ static void tun_setup(struct net_device
tun->owner = -1;
tun->group = -1;
+ tun->nid = current->nid;
dev->open = tun_net_open;
dev->hard_start_xmit = tun_net_xmit;
@@ -494,6 +497,9 @@ static int tun_set_iff(struct file *file
tun = tun_get_by_name(ifr->ifr_name);
if (tun) {
+ if (!nx_check(tun->nid, VS_IDENT | VS_HOSTID | VS_ADMIN_P))
+ return -EPERM;
+
if (tun->attached)
return -EBUSY;
@@ -502,7 +508,7 @@ static int tun_set_iff(struct file *file
current->euid != tun->owner) ||
(tun->group != -1 &&
current->egid != tun->group)) &&
- !capable(CAP_NET_ADMIN))
+ !cap_raised(current->cap_effective, CAP_NET_ADMIN))
return -EPERM;
}
else if (__dev_get_by_name(&init_net, ifr->ifr_name))
@@ -513,7 +519,7 @@ static int tun_set_iff(struct file *file
err = -EINVAL;
- if (!capable(CAP_NET_ADMIN))
+ if (!nx_capable(CAP_NET_ADMIN, NXC_TUN_CREATE))
return -EPERM;
/* Set dev type */
@@ -656,6 +662,16 @@ static int tun_chr_ioctl(struct inode *i
DBG(KERN_INFO "%s: group set to %d\n", tun->dev->name, tun->group);
break;
+ case TUNSETNID:
+ if (!capable(CAP_CONTEXT))
+ return -EPERM;
+
+ /* Set nid owner of the device */
+ tun->nid = (nid_t) arg;
+
+ DBG(KERN_INFO "%s: nid owner set to %u\n", tun->dev->name, tun->nid);
+ break;
+
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
--- a/fs/attr.c 2008-04-17 11:31:35.000000000 -0400
+++ a/fs/attr.c 2008-04-19 15:14:52.000000000 -0400
@@ -14,6 +14,9 @@
#include <linux/fcntl.h>
#include <linux/quotaops.h>
#include <linux/security.h>
+#include <linux/proc_fs.h>
+#include <linux/devpts_fs.h>
+#include <linux/vs_base.h>
/* Taken over from the old code... */
@@ -55,6 +58,27 @@ int inode_change_ok(struct inode *inode,
if (!is_owner_or_cap(inode))
goto error;
}
+
+ /* Check for evil vserver activity */
+ if (vx_check(0, VS_ADMIN))
+ goto fine;
+
+ if (IS_BARRIER(inode)) {
+ vxwprintk_task(1, "messing with the barrier.");
+ goto error;
+ }
+ switch (inode->i_sb->s_magic) {
+ case PROC_SUPER_MAGIC:
+ /* maybe allow that in the future? */
+ vxwprintk_task(1, "messing with the procfs.");
+ goto error;
+ case DEVPTS_SUPER_MAGIC:
+ /* devpts is xid tagged */
+ if (vx_check((xid_t)inode->i_tag, VS_IDENT))
+ goto fine;
+ vxwprintk_task(1, "messing with the devpts.");
+ goto error;
+ }
fine:
retval = 0;
error:
@@ -78,6 +102,8 @@ int inode_setattr(struct inode * inode,
inode->i_uid = attr->ia_uid;
if (ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
+ if ((ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+ inode->i_tag = attr->ia_tag;
if (ia_valid & ATTR_ATIME)
inode->i_atime = timespec_trunc(attr->ia_atime,
inode->i_sb->s_time_gran);
@@ -167,7 +193,8 @@ int notify_change(struct dentry * dentry
error = security_inode_setattr(dentry, attr);
if (!error) {
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
+ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
+ (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag))
error = DQUOT_TRANSFER(inode, attr) ? -EDQUOT : 0;
if (!error)
error = inode_setattr(inode, attr);
--- a/fs/binfmt_aout.c 2008-04-17 12:05:39.000000000 -0400
+++ a/fs/binfmt_aout.c 2008-04-19 15:14:52.000000000 -0400
@@ -24,6 +24,7 @@
#include <linux/binfmts.h>
#include <linux/personality.h>
#include <linux/init.h>
+#include <linux/vs_memory.h>
#include <asm/system.h>
#include <asm/uaccess.h>
--- a/fs/binfmt_elf.c 2008-04-17 12:05:39.000000000 -0400
+++ a/fs/binfmt_elf.c 2008-04-19 15:14:52.000000000 -0400
@@ -39,6 +39,7 @@
#include <linux/random.h>
#include <linux/elf.h>
#include <linux/utsname.h>
+#include <linux/vs_memory.h>
#include <asm/uaccess.h>
#include <asm/param.h>
#include <asm/page.h>
--- a/fs/binfmt_flat.c 2008-04-17 12:05:39.000000000 -0400
+++ a/fs/binfmt_flat.c 2008-04-19 15:14:52.000000000 -0400
@@ -35,6 +35,7 @@
#include <linux/init.h>
#include <linux/flat.h>
#include <linux/syscalls.h>
+#include <linux/vs_memory.h>
#include <asm/byteorder.h>
#include <asm/system.h>
--- a/fs/binfmt_som.c 2008-04-17 12:05:39.000000000 -0400
+++ a/fs/binfmt_som.c 2008-04-19 15:14:52.000000000 -0400
@@ -28,6 +28,7 @@
#include <linux/shm.h>
#include <linux/personality.h>
#include <linux/init.h>
+#include <linux/vs_memory.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
--- a/fs/block_dev.c 2008-04-17 12:05:39.000000000 -0400
+++ a/fs/block_dev.c 2008-04-19 15:14:52.000000000 -0400
@@ -23,6 +23,7 @@
#include <linux/uio.h>
#include <linux/namei.h>
#include <linux/log2.h>
+#include <linux/vs_device.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -388,6 +389,7 @@ struct block_device *bdget(dev_t dev)
bdev->bd_invalidated = 0;
inode->i_mode = S_IFBLK;
inode->i_rdev = dev;
+ inode->i_mdev = dev;
inode->i_bdev = bdev;
inode->i_data.a_ops = &def_blk_aops;
mapping_set_gfp_mask(&inode->i_data, GFP_USER);
@@ -424,6 +426,11 @@ EXPORT_SYMBOL(bdput);
static struct block_device *bd_acquire(struct inode *inode)
{
struct block_device *bdev;
+ dev_t mdev;
+
+ if (!vs_map_blkdev(inode->i_rdev, &mdev, DATTR_OPEN))
+ return NULL;
+ inode->i_mdev = mdev;
spin_lock(&bdev_lock);
bdev = inode->i_bdev;
@@ -434,7 +441,7 @@ static struct block_device *bd_acquire(s
}
spin_unlock(&bdev_lock);
- bdev = bdget(inode->i_rdev);
+ bdev = bdget(mdev);
if (bdev) {
spin_lock(&bdev_lock);
if (!inode->i_bdev) {
--- a/fs/char_dev.c 2008-04-17 12:05:39.000000000 -0400
+++ a/fs/char_dev.c 2008-04-29 18:44:50.000000000 -0400
@@ -21,6 +21,8 @@
#include <linux/cdev.h>
#include <linux/mutex.h>
#include <linux/backing-dev.h>
+#include <linux/vs_context.h>
+#include <linux/vs_device.h>
#ifdef CONFIG_KMOD
#include <linux/kmod.h>
@@ -362,14 +364,21 @@ static int chrdev_open(struct inode *ino
struct cdev *p;
struct cdev *new = NULL;
int ret = 0;
+ dev_t mdev;
+
+ if (!vs_map_chrdev(inode->i_rdev, &mdev, DATTR_OPEN))
+ return -EPERM;
+ inode->i_mdev = mdev;
spin_lock(&cdev_lock);
p = inode->i_cdev;
if (!p) {
struct kobject *kobj;
int idx;
+
spin_unlock(&cdev_lock);
- kobj = kobj_lookup(cdev_map, inode->i_rdev, &idx);
+
+ kobj = kobj_lookup(cdev_map, mdev, &idx);
if (!kobj)
return -ENXIO;
new = container_of(kobj, struct cdev, kobj);
--- a/fs/dcache.c 2008-04-17 12:05:39.000000000 -0400
+++ a/fs/dcache.c 2008-04-19 17:06:15.000000000 -0400
@@ -31,6 +31,7 @@
#include <linux/seqlock.h>
#include <linux/swap.h>
#include <linux/bootmem.h>
+#include <linux/vs_limit.h>
#include "internal.h"
@@ -184,6 +185,7 @@ void dput(struct dentry *dentry)
if (!dentry)
return;
+ vx_dentry_dec(dentry);
repeat:
if (atomic_read(&dentry->d_count) == 1)
might_sleep();
@@ -197,6 +199,8 @@ repeat:
return;
}
+ vx_dentry_dec(dentry);
+
/*
* AV: ->d_delete() is _NOT_ allowed to block now.
*/
@@ -288,6 +292,7 @@ static inline struct dentry * __dget_loc
{
atomic_inc(&dentry->d_count);
dentry_lru_remove(dentry);
+ vx_dentry_inc(dentry);
return dentry;
}
@@ -885,6 +890,9 @@ struct dentry *d_alloc(struct dentry * p
struct dentry *dentry;
char *dname;
+ if (!vx_dentry_avail(1))
+ return NULL;
+
dentry = kmem_cache_alloc(dentry_cache, GFP_KERNEL);
if (!dentry)
return NULL;
@@ -933,6 +941,7 @@ struct dentry *d_alloc(struct dentry * p
if (parent)
list_add(&dentry->d_u.d_child, &parent->d_subdirs);
dentry_stat.nr_dentry++;
+ vx_dentry_inc(dentry);
spin_unlock(&dcache_lock);
return dentry;
@@ -1282,6 +1291,7 @@ struct dentry * __d_lookup(struct dentry
if (!d_unhashed(dentry)) {
atomic_inc(&dentry->d_count);
+ vx_dentry_inc(dentry);
found = dentry;
}
spin_unlock(&dentry->d_lock);
--- a/fs/devpts/inode.c 2008-04-17 12:05:39.000000000 -0400
+++ a/fs/devpts/inode.c 2008-04-21 09:23:34.000000000 -0400
@@ -17,15 +17,30 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/tty.h>
+#include <linux/magic.h>
#include <linux/devpts_fs.h>
#include <linux/parser.h>
#include <linux/fsnotify.h>
#include <linux/seq_file.h>
+#include <linux/vs_base.h>
-#define DEVPTS_SUPER_MAGIC 0x1cd1
#define DEVPTS_DEFAULT_MODE 0600
+static int devpts_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+ int ret = -EACCES;
+
+ /* devpts is xid tagged */
+ if (vx_check((xid_t)inode->i_tag, VS_WATCH_P | VS_IDENT))
+ ret = generic_permission(inode, mask, NULL);
+ return ret;
+}
+
+static struct inode_operations devpts_file_inode_operations = {
+ .permission = devpts_permission,
+};
+
static struct vfsmount *devpts_mnt;
static struct dentry *devpts_root;
@@ -106,6 +121,25 @@ static int devpts_show_options(struct se
return 0;
}
+static int devpts_filter(struct dentry *de)
+{
+ /* devpts is xid tagged */
+ return vx_check((xid_t)de->d_inode->i_tag, VS_WATCH_P | VS_IDENT);
+}
+
+static int devpts_readdir(struct file * filp, void * dirent, filldir_t filldir)
+{
+ return dcache_readdir_filter(filp, dirent, filldir, devpts_filter);
+}
+
+static struct file_operations devpts_dir_operations = {
+ .open = dcache_dir_open,
+ .release = dcache_dir_close,
+ .llseek = dcache_dir_lseek,
+ .read = generic_read_dir,
+ .readdir = devpts_readdir,
+};
+
static const struct super_operations devpts_sops = {
.statfs = simple_statfs,
.remount_fs = devpts_remount,
@@ -132,8 +166,10 @@ devpts_fill_super(struct super_block *s,
inode->i_uid = inode->i_gid = 0;
inode->i_mode = S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR;
inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
+ inode->i_fop = &devpts_dir_operations;
inode->i_nlink = 2;
+ /* devpts is xid tagged */
+ inode->i_tag = (tag_t)vx_current_xid();
devpts_root = s->s_root = d_alloc_root(inode);
if (s->s_root)
@@ -191,6 +227,9 @@ int devpts_pty_new(struct tty_struct *tt
inode->i_gid = config.setgid ? config.gid : current->fsgid;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
init_special_inode(inode, S_IFCHR|config.mode, device);
+ /* devpts is xid tagged */
+ inode->i_tag = (tag_t)vx_current_xid();
+ inode->i_op = &devpts_file_inode_operations;
inode->i_private = tty;
dentry = get_node(number);
--- a/fs/ecryptfs/inode.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ecryptfs/inode.c 2008-04-19 15:14:52.000000000 -0400
@@ -400,7 +400,7 @@ static int ecryptfs_link(struct dentry *
dget(lower_new_dentry);
lower_dir_dentry = lock_parent(lower_new_dentry);
rc = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode,
- lower_new_dentry);
+ lower_new_dentry, NULL);
if (rc || !lower_new_dentry->d_inode)
goto out_lock;
rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0);
@@ -428,7 +428,7 @@ static int ecryptfs_unlink(struct inode
struct dentry *lower_dir_dentry;
lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_unlink(lower_dir_inode, lower_dentry);
+ rc = vfs_unlink(lower_dir_inode, lower_dentry, NULL);
if (rc) {
printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
goto out_unlock;
@@ -466,7 +466,7 @@ static int ecryptfs_symlink(struct inode
goto out_lock;
}
rc = vfs_symlink(lower_dir_dentry->d_inode, lower_dentry,
- encoded_symname, mode);
+ encoded_symname, mode, NULL);
kfree(encoded_symname);
if (rc || !lower_dentry->d_inode)
goto out_lock;
@@ -491,7 +491,7 @@ static int ecryptfs_mkdir(struct inode *
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, mode);
+ rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, mode, NULL);
if (rc || !lower_dentry->d_inode)
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
@@ -517,7 +517,7 @@ static int ecryptfs_rmdir(struct inode *
dget(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
dget(lower_dentry);
- rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
+ rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry, NULL);
dput(lower_dentry);
if (!rc)
d_delete(lower_dentry);
@@ -539,7 +539,7 @@ ecryptfs_mknod(struct inode *dir, struct
lower_dentry = ecryptfs_dentry_to_lower(dentry);
lower_dir_dentry = lock_parent(lower_dentry);
- rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, mode, dev);
+ rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, mode, dev, NULL);
if (rc || !lower_dentry->d_inode)
goto out;
rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
--- a/fs/exec.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/exec.c 2008-04-19 15:14:52.000000000 -0400
@@ -249,7 +249,9 @@ static int __bprm_mm_init(struct linux_b
goto err;
}
- mm->stack_vm = mm->total_vm = 1;
+ mm->total_vm = 0;
+ vx_vmpages_inc(mm);
+ mm->stack_vm = 1;
up_write(&mm->mmap_sem);
bprm->p = vma->vm_end - sizeof(void *);
@@ -1452,7 +1454,7 @@ static int format_corename(char *corenam
/* UNIX time of coredump */
case 't': {
struct timeval tv;
- do_gettimeofday(&tv);
+ vx_gettimeofday(&tv);
rc = snprintf(out_ptr, out_end - out_ptr,
"%lu", tv.tv_sec);
if (rc > out_end - out_ptr)
--- a/fs/ext2/balloc.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext2/balloc.c 2008-04-19 15:14:52.000000000 -0400
@@ -16,6 +16,8 @@
#include <linux/sched.h>
#include <linux/buffer_head.h>
#include <linux/capability.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vs_tag.h>
/*
* balloc.c contains the blocks allocation and deallocation routines
@@ -569,6 +571,7 @@ do_more:
}
error_return:
brelse(bitmap_bh);
+ DLIMIT_FREE_BLOCK(inode, freed);
release_blocks(sb, freed);
DQUOT_FREE_BLOCK(inode, freed);
}
@@ -701,7 +704,6 @@ ext2_try_to_allocate(struct super_block
start = 0;
end = EXT2_BLOCKS_PER_GROUP(sb);
}
-
BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));
repeat:
@@ -1251,6 +1253,11 @@ ext2_fsblk_t ext2_new_blocks(struct inod
*errp = -EDQUOT;
return 0;
}
+ if (DLIMIT_ALLOC_BLOCK(inode, num)) {
+ *errp = -ENOSPC;
+ DQUOT_FREE_BLOCK(inode, num);
+ return 0;
+ }
sbi = EXT2_SB(sb);
es = EXT2_SB(sb)->s_es;
@@ -1403,6 +1410,7 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
+ DLIMIT_FREE_BLOCK(inode, *count-num);
DQUOT_FREE_BLOCK(inode, *count-num);
*count = num;
return ret_block;
@@ -1413,8 +1421,10 @@ out:
/*
* Undo the block allocation
*/
- if (!performed_allocation)
+ if (!performed_allocation) {
+ DLIMIT_FREE_BLOCK(inode, *count);
DQUOT_FREE_BLOCK(inode, *count);
+ }
brelse(bitmap_bh);
return 0;
}
--- a/fs/ext2/ext2.h 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext2/ext2.h 2008-04-19 15:14:52.000000000 -0400
@@ -168,6 +168,7 @@ extern const struct file_operations ext2
extern const struct address_space_operations ext2_aops;
extern const struct address_space_operations ext2_aops_xip;
extern const struct address_space_operations ext2_nobh_aops;
+extern int ext2_sync_flags(struct inode *inode);
/* namei.c */
extern const struct inode_operations ext2_dir_inode_operations;
--- a/fs/ext2/file.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext2/file.c 2008-04-19 15:14:52.000000000 -0400
@@ -86,4 +86,5 @@ const struct inode_operations ext2_file_
#endif
.setattr = ext2_setattr,
.permission = ext2_permission,
+ .sync_flags = ext2_sync_flags,
};
--- a/fs/ext2/ialloc.c 2008-04-17 11:31:35.000000000 -0400
+++ a/fs/ext2/ialloc.c 2008-04-19 15:14:52.000000000 -0400
@@ -17,6 +17,8 @@
#include <linux/backing-dev.h>
#include <linux/buffer_head.h>
#include <linux/random.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vs_tag.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
@@ -125,6 +127,7 @@ void ext2_free_inode (struct inode * ino
ext2_xattr_delete_inode(inode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
+ DLIMIT_FREE_INODE(inode);
}
es = EXT2_SB(sb)->s_es;
@@ -456,6 +459,11 @@ struct inode *ext2_new_inode(struct inod
if (!inode)
return ERR_PTR(-ENOMEM);
+ inode->i_tag = dx_current_fstag(sb);
+ if (DLIMIT_ALLOC_INODE(inode)) {
+ err = -ENOSPC;
+ goto fail_dlim;
+ }
ei = EXT2_I(inode);
sbi = EXT2_SB(sb);
es = sbi->s_es;
@@ -569,7 +577,8 @@ got:
inode->i_blocks = 0;
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC;
memset(ei->i_data, 0, sizeof(ei->i_data));
- ei->i_flags = EXT2_I(dir)->i_flags & ~EXT2_BTREE_FL;
+ ei->i_flags = EXT2_I(dir)->i_flags &
+ ~(EXT2_BTREE_FL|EXT2_IUNLINK_FL|EXT2_BARRIER_FL);
if (S_ISLNK(mode))
ei->i_flags &= ~(EXT2_IMMUTABLE_FL|EXT2_APPEND_FL);
/* dirsync is only applied to directories */
@@ -614,12 +623,15 @@ fail_free_drop:
fail_drop:
DQUOT_DROP(inode);
+ DLIMIT_FREE_INODE(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
return ERR_PTR(err);
fail:
+ DLIMIT_FREE_INODE(inode);
+fail_dlim:
make_bad_inode(inode);
iput(inode);
return ERR_PTR(err);
--- a/fs/ext2/inode.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext2/inode.c 2008-04-21 10:14:57.000000000 -0400
@@ -31,6 +31,7 @@
#include <linux/writeback.h>
#include <linux/buffer_head.h>
#include <linux/mpage.h>
+#include <linux/vs_tag.h>
#include "ext2.h"
#include "acl.h"
#include "xip.h"
@@ -1011,7 +1012,7 @@ void ext2_truncate(struct inode *inode)
return;
if (ext2_inode_is_fast_symlink(inode))
return;
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
return;
blocksize = inode->i_sb->s_blocksize;
@@ -1149,13 +1150,20 @@ void ext2_set_inode_flags(struct inode *
{
unsigned int flags = EXT2_I(inode)->i_flags;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ inode->i_flags &= ~(S_IMMUTABLE | S_IUNLINK | S_BARRIER |
+ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+ if (flags & EXT2_IMMUTABLE_FL)
+ inode->i_flags |= S_IMMUTABLE;
+ if (flags & EXT2_IUNLINK_FL)
+ inode->i_flags |= S_IUNLINK;
+ if (flags & EXT2_BARRIER_FL)
+ inode->i_flags |= S_BARRIER;
+
if (flags & EXT2_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT2_APPEND_FL)
inode->i_flags |= S_APPEND;
- if (flags & EXT2_IMMUTABLE_FL)
- inode->i_flags |= S_IMMUTABLE;
if (flags & EXT2_NOATIME_FL)
inode->i_flags |= S_NOATIME;
if (flags & EXT2_DIRSYNC_FL)
@@ -1181,6 +1189,30 @@ void ext2_get_inode_flags(struct ext2_in
ei->i_flags |= EXT2_DIRSYNC_FL;
}
+int ext2_sync_flags(struct inode *inode)
+{
+ unsigned int oldflags, newflags;
+
+ oldflags = EXT2_I(inode)->i_flags;
+ newflags = oldflags & ~(EXT2_IMMUTABLE_FL |
+ EXT2_IUNLINK_FL | EXT2_BARRIER_FL);
+
+ if (IS_IMMUTABLE(inode))
+ newflags |= EXT2_IMMUTABLE_FL;
+ if (IS_IUNLINK(inode))
+ newflags |= EXT2_IUNLINK_FL;
+ if (IS_BARRIER(inode))
+ newflags |= EXT2_BARRIER_FL;
+
+ if (oldflags ^ newflags) {
+ EXT2_I(inode)->i_flags = newflags;
+ inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+ }
+ return 0;
+}
+
+
struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
{
struct ext2_inode_info *ei;
@@ -1188,6 +1220,8 @@ struct inode *ext2_iget (struct super_bl
struct ext2_inode *raw_inode;
struct inode *inode;
long ret = -EIO;
+ uid_t uid;
+ gid_t gid;
int n;
inode = iget_locked(sb, ino);
@@ -1210,12 +1244,17 @@ struct inode *ext2_iget (struct super_bl
}
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
- inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
- inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+ uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+ gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
if (!(test_opt (inode->i_sb, NO_UID32))) {
- inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
- inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+ uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+ gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
+ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+ le16_to_cpu(raw_inode->i_raw_tag));
+
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
inode->i_size = le32_to_cpu(raw_inode->i_size);
inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
@@ -1311,8 +1350,8 @@ static int ext2_update_inode(struct inod
struct ext2_inode_info *ei = EXT2_I(inode);
struct super_block *sb = inode->i_sb;
ino_t ino = inode->i_ino;
- uid_t uid = inode->i_uid;
- gid_t gid = inode->i_gid;
+ uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+ gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
struct buffer_head * bh;
struct ext2_inode * raw_inode = ext2_get_inode(sb, ino, &bh);
int n;
@@ -1348,6 +1387,9 @@ static int ext2_update_inode(struct inod
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
+#ifdef CONFIG_TAGGING_INTERN
+ raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
+#endif
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le32(inode->i_size);
raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
@@ -1434,7 +1476,8 @@ int ext2_setattr(struct dentry *dentry,
if (error)
return error;
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
- (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
+ (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) ||
+ (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) {
error = DQUOT_TRANSFER(inode, iattr) ? -EDQUOT : 0;
if (error)
return error;
--- a/fs/ext2/ioctl.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext2/ioctl.c 2008-04-19 15:14:52.000000000 -0400
@@ -13,6 +13,7 @@
#include <linux/sched.h>
#include <linux/compat.h>
#include <linux/smp_lock.h>
+#include <linux/mount.h>
#include <asm/current.h>
#include <asm/uaccess.h>
@@ -34,7 +35,8 @@ long ext2_ioctl(struct file *filp, unsig
case EXT2_IOC_SETFLAGS: {
unsigned int oldflags;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (!is_owner_or_cap(inode))
@@ -60,7 +62,9 @@ long ext2_ioctl(struct file *filp, unsig
*
* This test looks nicer. Thanks to Pauline Middelink
*/
- if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) {
+ if ((oldflags & EXT2_IMMUTABLE_FL) ||
+ ((flags ^ oldflags) & (EXT2_APPEND_FL |
+ EXT2_IMMUTABLE_FL | EXT2_IUNLINK_FL))) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
mutex_unlock(&inode->i_mutex);
return -EPERM;
@@ -82,7 +86,8 @@ long ext2_ioctl(struct file *filp, unsig
case EXT2_IOC_SETVERSION:
if (!is_owner_or_cap(inode))
return -EPERM;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (get_user(inode->i_generation, (int __user *) arg))
return -EFAULT;
--- a/fs/ext2/namei.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext2/namei.c 2008-04-21 10:12:49.000000000 -0400
@@ -31,6 +31,7 @@
*/
#include <linux/pagemap.h>
+#include <linux/vs_tag.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
@@ -66,6 +67,7 @@ static struct dentry *ext2_lookup(struct
inode = ext2_iget(dir->i_sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
+ dx_propagate_tag(nd, inode);
}
return d_splice_alias(inode, dentry);
}
@@ -391,6 +393,7 @@ const struct inode_operations ext2_dir_i
#endif
.setattr = ext2_setattr,
.permission = ext2_permission,
+ .sync_flags = ext2_sync_flags,
};
const struct inode_operations ext2_special_inode_operations = {
@@ -402,4 +405,5 @@ const struct inode_operations ext2_speci
#endif
.setattr = ext2_setattr,
.permission = ext2_permission,
+ .sync_flags = ext2_sync_flags,
};
--- a/fs/ext2/super.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext2/super.c 2008-04-19 15:14:52.000000000 -0400
@@ -390,7 +390,8 @@ enum {
Opt_err_ro, Opt_nouid32, Opt_nocheck, Opt_debug,
Opt_oldalloc, Opt_orlov, Opt_nobh, Opt_user_xattr, Opt_nouser_xattr,
Opt_acl, Opt_noacl, Opt_xip, Opt_ignore, Opt_err, Opt_quota,
- Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation
+ Opt_usrquota, Opt_grpquota, Opt_reservation, Opt_noreservation,
+ Opt_tag, Opt_notag, Opt_tagid
};
static match_table_t tokens = {
@@ -418,6 +419,9 @@ static match_table_t tokens = {
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_xip, "xip"},
+ {Opt_tag, "tag"},
+ {Opt_notag, "notag"},
+ {Opt_tagid, "tagid=%u"},
{Opt_grpquota, "grpquota"},
{Opt_ignore, "noquota"},
{Opt_quota, "quota"},
@@ -488,6 +492,20 @@ static int parse_options (char * options
case Opt_nouid32:
set_opt (sbi->s_mount_opt, NO_UID32);
break;
+#ifndef CONFIG_TAGGING_NONE
+ case Opt_tag:
+ set_opt (sbi->s_mount_opt, TAGGED);
+ break;
+ case Opt_notag:
+ clear_opt (sbi->s_mount_opt, TAGGED);
+ break;
+#endif
+#ifdef CONFIG_PROPAGATE
+ case Opt_tagid:
+ /* use args[0] */
+ set_opt (sbi->s_mount_opt, TAGGED);
+ break;
+#endif
case Opt_nocheck:
clear_opt (sbi->s_mount_opt, CHECK);
break;
@@ -831,6 +849,8 @@ static int ext2_fill_super(struct super_
if (!parse_options ((char *) data, sbi))
goto failed_mount;
+ if (EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_TAGGED)
+ sb->s_flags |= MS_TAGGED;
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((EXT2_SB(sb)->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ?
MS_POSIXACL : 0);
@@ -1164,6 +1184,13 @@ static int ext2_remount (struct super_bl
goto restore_opts;
}
+ if ((sbi->s_mount_opt & EXT2_MOUNT_TAGGED) &&
+ !(sb->s_flags & MS_TAGGED)) {
+ printk("EXT2-fs: %s: tagging not permitted on remount.\n",
+ sb->s_id);
+ return -EINVAL;
+ }
+
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT2_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
--- a/fs/ext2/symlink.c 2008-04-17 10:32:27.000000000 -0400
+++ a/fs/ext2/symlink.c 2008-04-19 15:14:52.000000000 -0400
@@ -38,6 +38,7 @@ const struct inode_operations ext2_symli
.listxattr = ext2_listxattr,
.removexattr = generic_removexattr,
#endif
+ .sync_flags = ext2_sync_flags,
};
const struct inode_operations ext2_fast_symlink_inode_operations = {
@@ -49,4 +50,5 @@ const struct inode_operations ext2_fast_
.listxattr = ext2_listxattr,
.removexattr = generic_removexattr,
#endif
+ .sync_flags = ext2_sync_flags,
};
--- a/fs/ext2/xattr.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext2/xattr.c 2008-04-19 15:14:52.000000000 -0400
@@ -60,6 +60,7 @@
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
+#include <linux/vs_dlimit.h>
#include "ext2.h"
#include "xattr.h"
#include "acl.h"
@@ -641,8 +642,12 @@ ext2_xattr_set2(struct inode *inode, str
the inode. */
ea_bdebug(new_bh, "reusing block");
+ error = -ENOSPC;
+ if (DLIMIT_ALLOC_BLOCK(inode, 1))
+ goto cleanup;
error = -EDQUOT;
if (DQUOT_ALLOC_BLOCK(inode, 1)) {
+ DLIMIT_FREE_BLOCK(inode, 1);
unlock_buffer(new_bh);
goto cleanup;
}
@@ -735,6 +740,7 @@ ext2_xattr_set2(struct inode *inode, str
le32_to_cpu(HDR(old_bh)->h_refcount) - 1);
if (ce)
mb_cache_entry_release(ce);
+ DLIMIT_FREE_BLOCK(inode, 1);
DQUOT_FREE_BLOCK(inode, 1);
mark_buffer_dirty(old_bh);
ea_bdebug(old_bh, "refcount now=%d",
@@ -799,6 +805,7 @@ ext2_xattr_delete_inode(struct inode *in
mark_buffer_dirty(bh);
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
+ DLIMIT_FREE_BLOCK(inode, 1);
DQUOT_FREE_BLOCK(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;
--- a/fs/ext3/balloc.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext3/balloc.c 2008-04-19 15:14:52.000000000 -0400
@@ -19,6 +19,8 @@
#include <linux/ext3_jbd.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vs_tag.h>
/*
* balloc.c contains the blocks allocation and deallocation routines
@@ -675,8 +677,10 @@ void ext3_free_blocks(handle_t *handle,
return;
}
ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
- if (dquot_freed_blocks)
+ if (dquot_freed_blocks) {
+ DLIMIT_FREE_BLOCK(inode, dquot_freed_blocks);
DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+ }
return;
}
@@ -1415,18 +1419,33 @@ out:
*
* Check if filesystem has at least 1 free block available for allocation.
*/
-static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
+static int ext3_has_free_blocks(struct super_block *sb)
{
- ext3_fsblk_t free_blocks, root_blocks;
+ struct ext3_sb_info *sbi = EXT3_SB(sb);
+ unsigned long long free_blocks, root_blocks;
+ int cond;
free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
+
+ vxdprintk(VXD_CBIT(dlim, 3),
+ "ext3_has_free_blocks(%p): free=%llu, root=%llu",
+ sb, free_blocks, root_blocks);
+
+ DLIMIT_ADJUST_BLOCK(sb, dx_current_tag(), &free_blocks, &root_blocks);
+
+ cond = (free_blocks < root_blocks + 1 &&
+ !capable(CAP_SYS_RESOURCE) &&
sbi->s_resuid != current->fsuid &&
- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
- return 0;
- }
- return 1;
+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)));
+
+ vxdprintk(VXD_CBIT(dlim, 3),
+ "ext3_has_free_blocks(%p): %llu<%llu+1, %c, %u!=%u r=%d",
+ sb, free_blocks, root_blocks,
+ !capable(CAP_SYS_RESOURCE)?'1':'0',
+ sbi->s_resuid, current->fsuid, cond?0:1);
+
+ return (cond ? 0 : 1);
}
/**
@@ -1443,7 +1462,7 @@ static int ext3_has_free_blocks(struct e
*/
int ext3_should_retry_alloc(struct super_block *sb, int *retries)
{
- if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3)
+ if (!ext3_has_free_blocks(sb) || (*retries)++ > 3)
return 0;
jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
@@ -1506,6 +1525,8 @@ ext3_fsblk_t ext3_new_blocks(handle_t *h
*errp = -EDQUOT;
return 0;
}
+ if (DLIMIT_ALLOC_BLOCK(inode, num))
+ goto out_dlimit;
sbi = EXT3_SB(sb);
es = EXT3_SB(sb)->s_es;
@@ -1522,7 +1543,7 @@ ext3_fsblk_t ext3_new_blocks(handle_t *h
if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
my_rsv = &block_i->rsv_window_node;
- if (!ext3_has_free_blocks(sbi)) {
+ if (!ext3_has_free_blocks(sb)) {
*errp = -ENOSPC;
goto out;
}
@@ -1710,12 +1731,16 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
DQUOT_FREE_BLOCK(inode, *count-num);
+ DLIMIT_FREE_BLOCK(inode, *count-num);
*count = num;
return ret_block;
io_error:
*errp = -EIO;
out:
+ if (!performed_allocation)
+ DLIMIT_FREE_BLOCK(inode, *count);
+out_dlimit:
if (fatal) {
*errp = fatal;
ext3_std_error(sb, fatal);
--- a/fs/ext3/file.c 2008-04-17 10:37:23.000000000 -0400
+++ a/fs/ext3/file.c 2008-04-19 15:14:52.000000000 -0400
@@ -134,5 +134,6 @@ const struct inode_operations ext3_file_
.removexattr = generic_removexattr,
#endif
.permission = ext3_permission,
+ .sync_flags = ext3_sync_flags,
};
--- a/fs/ext3/ialloc.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext3/ialloc.c 2008-04-19 15:14:52.000000000 -0400
@@ -23,6 +23,8 @@
#include <linux/buffer_head.h>
#include <linux/random.h>
#include <linux/bitops.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vs_tag.h>
#include <asm/byteorder.h>
@@ -127,6 +129,7 @@ void ext3_free_inode (handle_t *handle,
ext3_xattr_delete_inode(handle, inode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
+ DLIMIT_FREE_INODE(inode);
is_directory = S_ISDIR(inode->i_mode);
@@ -440,6 +443,12 @@ struct inode *ext3_new_inode(handle_t *h
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
+
+ inode->i_tag = dx_current_fstag(sb);
+ if (DLIMIT_ALLOC_INODE(inode)) {
+ err = -ENOSPC;
+ goto out_dlimit;
+ }
ei = EXT3_I(inode);
sbi = EXT3_SB(sb);
@@ -559,7 +568,8 @@ got:
ei->i_dir_start_lookup = 0;
ei->i_disksize = 0;
- ei->i_flags = EXT3_I(dir)->i_flags & ~EXT3_INDEX_FL;
+ ei->i_flags = EXT3_I(dir)->i_flags &
+ ~(EXT3_INDEX_FL|EXT3_IUNLINK_FL|EXT3_BARRIER_FL);
if (S_ISLNK(mode))
ei->i_flags &= ~(EXT3_IMMUTABLE_FL|EXT3_APPEND_FL);
/* dirsync only applies to directories */
@@ -614,6 +624,8 @@ got:
fail:
ext3_std_error(sb, err);
out:
+ DLIMIT_FREE_INODE(inode);
+out_dlimit:
iput(inode);
ret = ERR_PTR(err);
really_out:
@@ -625,6 +637,7 @@ fail_free_drop:
fail_drop:
DQUOT_DROP(inode);
+ DLIMIT_FREE_INODE(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
--- a/fs/ext3/inode.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext3/inode.c 2008-04-20 13:25:49.000000000 -0400
@@ -36,6 +36,7 @@
#include <linux/mpage.h>
#include <linux/uio.h>
#include <linux/bio.h>
+#include <linux/vs_tag.h>
#include "xattr.h"
#include "acl.h"
@@ -2300,7 +2301,7 @@ void ext3_truncate(struct inode *inode)
return;
if (ext3_inode_is_fast_symlink(inode))
return;
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
return;
/*
@@ -2622,13 +2623,20 @@ void ext3_set_inode_flags(struct inode *
{
unsigned int flags = EXT3_I(inode)->i_flags;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ inode->i_flags &= ~(S_IMMUTABLE | S_IUNLINK | S_BARRIER |
+ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+ if (flags & EXT3_IMMUTABLE_FL)
+ inode->i_flags |= S_IMMUTABLE;
+ if (flags & EXT3_IUNLINK_FL)
+ inode->i_flags |= S_IUNLINK;
+ if (flags & EXT3_BARRIER_FL)
+ inode->i_flags |= S_BARRIER;
+
if (flags & EXT3_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT3_APPEND_FL)
inode->i_flags |= S_APPEND;
- if (flags & EXT3_IMMUTABLE_FL)
- inode->i_flags |= S_IMMUTABLE;
if (flags & EXT3_NOATIME_FL)
inode->i_flags |= S_NOATIME;
if (flags & EXT3_DIRSYNC_FL)
@@ -2654,6 +2662,45 @@ void ext3_get_inode_flags(struct ext3_in
ei->i_flags |= EXT3_DIRSYNC_FL;
}
+int ext3_sync_flags(struct inode *inode)
+{
+ unsigned int oldflags, newflags;
+ int err = 0;
+
+ oldflags = EXT3_I(inode)->i_flags;
+ newflags = oldflags & ~(EXT3_IMMUTABLE_FL |
+ EXT3_IUNLINK_FL | EXT3_BARRIER_FL);
+
+ if (IS_IMMUTABLE(inode))
+ newflags |= EXT3_IMMUTABLE_FL;
+ if (IS_IUNLINK(inode))
+ newflags |= EXT3_IUNLINK_FL;
+ if (IS_BARRIER(inode))
+ newflags |= EXT3_BARRIER_FL;
+
+ if (oldflags ^ newflags) {
+ handle_t *handle;
+ struct ext3_iloc iloc;
+
+ handle = ext3_journal_start(inode, 1);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (IS_SYNC(inode))
+ handle->h_sync = 1;
+ err = ext3_reserve_inode_write(handle, inode, &iloc);
+ if (err)
+ goto flags_err;
+
+ EXT3_I(inode)->i_flags = newflags;
+ inode->i_ctime = CURRENT_TIME;
+
+ err = ext3_mark_iloc_dirty(handle, inode, &iloc);
+ flags_err:
+ ext3_journal_stop(handle);
+ }
+ return err;
+}
+
struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
{
struct ext3_iloc iloc;
@@ -2663,6 +2710,8 @@ struct inode *ext3_iget(struct super_blo
struct inode *inode;
long ret;
int block;
+ uid_t uid;
+ gid_t gid;
inode = iget_locked(sb, ino);
if (!inode)
@@ -2683,12 +2732,17 @@ struct inode *ext3_iget(struct super_blo
bh = iloc.bh;
raw_inode = ext3_raw_inode(&iloc);
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
- inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
- inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+ uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+ gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
if(!(test_opt (inode->i_sb, NO_UID32))) {
- inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
- inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+ uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+ gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
+ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+ le16_to_cpu(raw_inode->i_raw_tag));
+
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
inode->i_size = le32_to_cpu(raw_inode->i_size);
inode->i_atime.tv_sec = (signed)le32_to_cpu(raw_inode->i_atime);
@@ -2817,6 +2871,8 @@ static int ext3_do_update_inode(handle_t
struct ext3_inode *raw_inode = ext3_raw_inode(iloc);
struct ext3_inode_info *ei = EXT3_I(inode);
struct buffer_head *bh = iloc->bh;
+ uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+ gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
int err = 0, rc, block;
/* For fields not not tracking in the in-memory inode,
@@ -2827,29 +2883,32 @@ static int ext3_do_update_inode(handle_t
ext3_get_inode_flags(ei);
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
if(!(test_opt(inode->i_sb, NO_UID32))) {
- raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
- raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
+ raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
+ raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
/*
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
if(!ei->i_dtime) {
raw_inode->i_uid_high =
- cpu_to_le16(high_16_bits(inode->i_uid));
+ cpu_to_le16(high_16_bits(uid));
raw_inode->i_gid_high =
- cpu_to_le16(high_16_bits(inode->i_gid));
+ cpu_to_le16(high_16_bits(gid));
} else {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low =
- cpu_to_le16(fs_high2lowuid(inode->i_uid));
+ cpu_to_le16(fs_high2lowuid(uid));
raw_inode->i_gid_low =
- cpu_to_le16(fs_high2lowgid(inode->i_gid));
+ cpu_to_le16(fs_high2lowgid(gid));
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
+#ifdef CONFIG_TAGGING_INTERN
+ raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
+#endif
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
raw_inode->i_size = cpu_to_le32(ei->i_disksize);
raw_inode->i_atime = cpu_to_le32(inode->i_atime.tv_sec);
@@ -3002,7 +3061,8 @@ int ext3_setattr(struct dentry *dentry,
return error;
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
+ (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
handle_t *handle;
/* (user+group)*(old+new) structure, inode write (sb,
@@ -3024,6 +3084,8 @@ int ext3_setattr(struct dentry *dentry,
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
+ if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+ inode->i_tag = attr->ia_tag;
error = ext3_mark_inode_dirty(handle, inode);
ext3_journal_stop(handle);
}
--- a/fs/ext3/ioctl.c 2008-04-17 11:31:35.000000000 -0400
+++ a/fs/ext3/ioctl.c 2008-04-19 15:14:52.000000000 -0400
@@ -8,6 +8,7 @@
*/
#include <linux/fs.h>
+#include <linux/mount.h>
#include <linux/jbd.h>
#include <linux/capability.h>
#include <linux/ext3_fs.h>
@@ -15,6 +16,7 @@
#include <linux/time.h>
#include <linux/compat.h>
#include <linux/smp_lock.h>
+#include <linux/vs_tag.h>
#include <asm/uaccess.h>
int ext3_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
@@ -38,7 +40,8 @@ int ext3_ioctl (struct inode * inode, st
unsigned int oldflags;
unsigned int jflag;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (!is_owner_or_cap(inode))
@@ -67,7 +70,9 @@ int ext3_ioctl (struct inode * inode, st
*
* This test looks nicer. Thanks to Pauline Middelink
*/
- if ((flags ^ oldflags) & (EXT3_APPEND_FL | EXT3_IMMUTABLE_FL)) {
+ if ((oldflags & EXT3_IMMUTABLE_FL) ||
+ ((flags ^ oldflags) & (EXT3_APPEND_FL |
+ EXT3_IMMUTABLE_FL | EXT3_IUNLINK_FL))) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
mutex_unlock(&inode->i_mutex);
return -EPERM;
@@ -129,7 +134,8 @@ flags_err:
if (!is_owner_or_cap(inode))
return -EPERM;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (get_user(generation, (int __user *) arg))
return -EFAULT;
@@ -183,7 +189,8 @@ flags_err:
if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
return -ENOTTY;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (!is_owner_or_cap(inode))
@@ -218,7 +225,8 @@ flags_err:
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (get_user(n_blocks_count, (__u32 __user *)arg))
@@ -239,7 +247,8 @@ flags_err:
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (copy_from_user(&input, (struct ext3_new_group_input __user *)arg,
--- a/fs/ext3/namei.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext3/namei.c 2008-04-19 15:14:52.000000000 -0400
@@ -36,6 +36,7 @@
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
+#include <linux/vs_tag.h>
#include "namei.h"
#include "xattr.h"
@@ -907,6 +908,7 @@ restart:
if (bh)
ll_rw_block(READ_META, 1, &bh);
}
+ dx_propagate_tag(nd, inode);
}
if ((bh = bh_use[ra_ptr++]) == NULL)
goto next;
@@ -2417,6 +2419,7 @@ const struct inode_operations ext3_dir_i
.removexattr = generic_removexattr,
#endif
.permission = ext3_permission,
+ .sync_flags = ext3_sync_flags,
};
const struct inode_operations ext3_special_inode_operations = {
@@ -2428,4 +2431,5 @@ const struct inode_operations ext3_speci
.removexattr = generic_removexattr,
#endif
.permission = ext3_permission,
+ .sync_flags = ext3_sync_flags,
};
--- a/fs/ext3/super.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext3/super.c 2008-04-20 13:26:55.000000000 -0400
@@ -756,7 +756,7 @@ enum {
Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_quota, Opt_noquota,
Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
- Opt_grpquota
+ Opt_grpquota, Opt_tag, Opt_notag, Opt_tagid
};
static match_table_t tokens = {
@@ -807,6 +807,9 @@ static match_table_t tokens = {
{Opt_usrquota, "usrquota"},
{Opt_barrier, "barrier=%u"},
{Opt_resize, "resize"},
+ {Opt_tag, "tag"},
+ {Opt_notag, "notag"},
+ {Opt_tagid, "tagid=%u"},
{Opt_err, NULL},
};
@@ -899,6 +902,20 @@ static int parse_options (char *options,
case Opt_nouid32:
set_opt (sbi->s_mount_opt, NO_UID32);
break;
+#ifndef CONFIG_TAGGING_NONE
+ case Opt_tag:
+ set_opt (sbi->s_mount_opt, TAGGED);
+ break;
+ case Opt_notag:
+ clear_opt (sbi->s_mount_opt, TAGGED);
+ break;
+#endif
+#ifdef CONFIG_PROPAGATE
+ case Opt_tagid:
+ /* use args[0] */
+ set_opt (sbi->s_mount_opt, TAGGED);
+ break;
+#endif
case Opt_nocheck:
clear_opt (sbi->s_mount_opt, CHECK);
break;
@@ -1591,6 +1608,9 @@ static int ext3_fill_super (struct super
NULL, 0))
goto failed_mount;
+ if (EXT3_SB(sb)->s_mount_opt & EXT3_MOUNT_TAGGED)
+ sb->s_flags |= MS_TAGGED;
+
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
@@ -2428,6 +2448,12 @@ static int ext3_remount (struct super_bl
if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
ext3_abort(sb, __FUNCTION__, "Abort forced by user");
+ if ((sbi->s_mount_opt & EXT3_MOUNT_TAGGED) &&
+ !(sb->s_flags & MS_TAGGED)) {
+ printk("EXT3-fs: %s: tagging not permitted on remount.\n",
+ sb->s_id);
+ return -EINVAL;
+ }
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
--- a/fs/ext3/symlink.c 2008-04-17 10:32:27.000000000 -0400
+++ a/fs/ext3/symlink.c 2008-04-19 15:14:52.000000000 -0400
@@ -40,6 +40,7 @@ const struct inode_operations ext3_symli
.listxattr = ext3_listxattr,
.removexattr = generic_removexattr,
#endif
+ .sync_flags = ext3_sync_flags,
};
const struct inode_operations ext3_fast_symlink_inode_operations = {
@@ -51,4 +52,5 @@ const struct inode_operations ext3_fast_
.listxattr = ext3_listxattr,
.removexattr = generic_removexattr,
#endif
+ .sync_flags = ext3_sync_flags,
};
--- a/fs/ext3/xattr.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext3/xattr.c 2008-04-19 15:14:52.000000000 -0400
@@ -58,6 +58,7 @@
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
+#include <linux/vs_dlimit.h>
#include "xattr.h"
#include "acl.h"
@@ -496,6 +497,7 @@ ext3_xattr_release_block(handle_t *handl
error = ext3_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode))
handle->h_sync = 1;
+ DLIMIT_FREE_BLOCK(inode, 1);
DQUOT_FREE_BLOCK(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
@@ -769,11 +771,14 @@ inserted:
if (new_bh == bs->bh)
ea_bdebug(new_bh, "keeping");
else {
+ error = -ENOSPC;
+ if (DLIMIT_ALLOC_BLOCK(inode, 1))
+ goto cleanup;
/* The old block is released after updating
the inode. */
error = -EDQUOT;
if (DQUOT_ALLOC_BLOCK(inode, 1))
- goto cleanup;
+ goto cleanup_dlimit;
error = ext3_journal_get_write_access(handle,
new_bh);
if (error)
@@ -849,6 +854,8 @@ cleanup:
cleanup_dquot:
DQUOT_FREE_BLOCK(inode, 1);
+cleanup_dlimit:
+ DLIMIT_FREE_BLOCK(inode, 1);
goto cleanup;
bad_block:
--- a/fs/ext4/balloc.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext4/balloc.c 2008-04-21 10:23:33.000000000 -0400
@@ -19,6 +19,8 @@
#include <linux/ext4_jbd2.h>
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vs_tag.h>
#include "group.h"
/*
@@ -810,8 +812,10 @@ void ext4_free_blocks(handle_t *handle,
else
ext4_mb_free_blocks(handle, inode, block, count,
metadata, &dquot_freed_blocks);
- if (dquot_freed_blocks)
+ if (dquot_freed_blocks) {
+ DLIMIT_FREE_BLOCK(inode, dquot_freed_blocks);
DQUOT_FREE_BLOCK(inode, dquot_freed_blocks);
+ }
return;
}
@@ -1551,18 +1555,33 @@ out:
*
* Check if filesystem has at least 1 free block available for allocation.
*/
-static int ext4_has_free_blocks(struct ext4_sb_info *sbi)
+static int ext4_has_free_blocks(struct super_block *sb)
{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t free_blocks, root_blocks;
+ int cond;
free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
root_blocks = ext4_r_blocks_count(sbi->s_es);
- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
+
+ vxdprintk(VXD_CBIT(dlim, 3),
+ "ext4_has_free_blocks(%p): free=%llu, root=%llu",
+ sb, free_blocks, root_blocks);
+
+ DLIMIT_ADJUST_BLOCK(sb, dx_current_tag(), &free_blocks, &root_blocks);
+
+ cond = (free_blocks < root_blocks + 1 &&
+ !capable(CAP_SYS_RESOURCE) &&
sbi->s_resuid != current->fsuid &&
- (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
- return 0;
- }
- return 1;
+ (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid)));
+
+ vxdprintk(VXD_CBIT(dlim, 3),
+ "ext4_has_free_blocks(%p): %llu<%llu+1, %c, %u!=%u r=%d",
+ sb, free_blocks, root_blocks,
+ !capable(CAP_SYS_RESOURCE)?'1':'0',
+ sbi->s_resuid, current->fsuid, cond?0:1);
+
+ return (cond ? 0 : 1);
}
/**
@@ -1579,7 +1598,7 @@ static int ext4_has_free_blocks(struct e
*/
int ext4_should_retry_alloc(struct super_block *sb, int *retries)
{
- if (!ext4_has_free_blocks(EXT4_SB(sb)) || (*retries)++ > 3)
+ if (!ext4_has_free_blocks(sb) || (*retries)++ > 3)
return 0;
jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
@@ -1639,6 +1658,8 @@ ext4_fsblk_t ext4_new_blocks_old(handle_
*errp = -EDQUOT;
return 0;
}
+ if (DLIMIT_ALLOC_BLOCK(inode, num))
+ goto out_dlimit;
sbi = EXT4_SB(sb);
es = EXT4_SB(sb)->s_es;
@@ -1655,7 +1676,7 @@ ext4_fsblk_t ext4_new_blocks_old(handle_
if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
my_rsv = &block_i->rsv_window_node;
- if (!ext4_has_free_blocks(sbi)) {
+ if (!ext4_has_free_blocks(sb)) {
*errp = -ENOSPC;
goto out;
}
@@ -1841,12 +1862,16 @@ allocated:
*errp = 0;
brelse(bitmap_bh);
DQUOT_FREE_BLOCK(inode, *count-num);
+ DLIMIT_FREE_BLOCK(inode, *count-num);
*count = num;
return ret_block;
io_error:
*errp = -EIO;
out:
+ if (!performed_allocation)
+ DLIMIT_FREE_BLOCK(inode, *count);
+out_dlimit:
if (fatal) {
*errp = fatal;
ext4_std_error(sb, fatal);
--- a/fs/ext4/file.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext4/file.c 2008-04-19 15:14:52.000000000 -0400
@@ -152,5 +152,6 @@ const struct inode_operations ext4_file_
#endif
.permission = ext4_permission,
.fallocate = ext4_fallocate,
+ .sync_flags = ext4_sync_flags,
};
--- a/fs/ext4/ialloc.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext4/ialloc.c 2008-04-21 10:29:28.000000000 -0400
@@ -24,6 +24,8 @@
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vs_tag.h>
#include <asm/byteorder.h>
#include "xattr.h"
@@ -186,6 +188,7 @@ void ext4_free_inode (handle_t *handle,
ext4_xattr_delete_inode(handle, inode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
+ DLIMIT_FREE_INODE(inode);
is_directory = S_ISDIR(inode->i_mode);
@@ -513,6 +516,12 @@ struct inode *ext4_new_inode(handle_t *h
inode = new_inode(sb);
if (!inode)
return ERR_PTR(-ENOMEM);
+
+ inode->i_tag = dx_current_fstag(sb);
+ if (DLIMIT_ALLOC_INODE(inode)) {
+ err = -ENOSPC;
+ goto out_dlimit;
+ }
ei = EXT4_I(inode);
sbi = EXT4_SB(sb);
@@ -707,7 +716,8 @@ got:
* newly created directory and file only if -o extent mount option is
* specified
*/
- ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
+ ei->i_flags = EXT4_I(dir)->i_flags &
+ ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL|EXT4_IUNLINK_FL|EXT4_BARRIER_FL);
if (S_ISLNK(mode))
ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
/* dirsync only applies to directories */
@@ -766,6 +776,8 @@ got:
fail:
ext4_std_error(sb, err);
out:
+ DLIMIT_FREE_INODE(inode);
+out_dlimit:
iput(inode);
ret = ERR_PTR(err);
really_out:
@@ -777,6 +789,7 @@ fail_free_drop:
fail_drop:
DQUOT_DROP(inode);
+ DLIMIT_FREE_INODE(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
iput(inode);
--- a/fs/ext4/inode.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext4/inode.c 2008-04-21 10:20:53.000000000 -0400
@@ -36,6 +36,7 @@
#include <linux/mpage.h>
#include <linux/uio.h>
#include <linux/bio.h>
+#include <linux/vs_tag.h>
#include "xattr.h"
#include "acl.h"
@@ -2342,7 +2343,7 @@ void ext4_truncate(struct inode *inode)
return;
if (ext4_inode_is_fast_symlink(inode))
return;
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
return;
/*
@@ -2671,13 +2672,20 @@ void ext4_set_inode_flags(struct inode *
{
unsigned int flags = EXT4_I(inode)->i_flags;
- inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+ inode->i_flags &= ~(S_IMMUTABLE | S_IUNLINK | S_BARRIER |
+ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
+
+ if (flags & EXT4_IMMUTABLE_FL)
+ inode->i_flags |= S_IMMUTABLE;
+ if (flags & EXT4_IUNLINK_FL)
+ inode->i_flags |= S_IUNLINK;
+ if (flags & EXT4_BARRIER_FL)
+ inode->i_flags |= S_BARRIER;
+
if (flags & EXT4_SYNC_FL)
inode->i_flags |= S_SYNC;
if (flags & EXT4_APPEND_FL)
inode->i_flags |= S_APPEND;
- if (flags & EXT4_IMMUTABLE_FL)
- inode->i_flags |= S_IMMUTABLE;
if (flags & EXT4_NOATIME_FL)
inode->i_flags |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
@@ -2702,6 +2710,46 @@ void ext4_get_inode_flags(struct ext4_in
if (flags & S_DIRSYNC)
ei->i_flags |= EXT4_DIRSYNC_FL;
}
+
+int ext4_sync_flags(struct inode *inode)
+{
+ unsigned int oldflags, newflags;
+ int err = 0;
+
+ oldflags = EXT4_I(inode)->i_flags;
+ newflags = oldflags & ~(EXT4_IMMUTABLE_FL |
+ EXT4_IUNLINK_FL | EXT4_BARRIER_FL);
+
+ if (IS_IMMUTABLE(inode))
+ newflags |= EXT4_IMMUTABLE_FL;
+ if (IS_IUNLINK(inode))
+ newflags |= EXT4_IUNLINK_FL;
+ if (IS_BARRIER(inode))
+ newflags |= EXT4_BARRIER_FL;
+
+ if (oldflags ^ newflags) {
+ handle_t *handle;
+ struct ext4_iloc iloc;
+
+ handle = ext4_journal_start(inode, 1);
+ if (IS_ERR(handle))
+ return PTR_ERR(handle);
+ if (IS_SYNC(inode))
+ handle->h_sync = 1;
+ err = ext4_reserve_inode_write(handle, inode, &iloc);
+ if (err)
+ goto flags_err;
+
+ EXT4_I(inode)->i_flags = newflags;
+ inode->i_ctime = CURRENT_TIME;
+
+ err = ext4_mark_iloc_dirty(handle, inode, &iloc);
+ flags_err:
+ ext4_journal_stop(handle);
+ }
+ return err;
+}
+
static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
struct ext4_inode_info *ei)
{
@@ -2734,6 +2782,8 @@ struct inode *ext4_iget(struct super_blo
struct inode *inode;
long ret;
int block;
+ uid_t uid;
+ gid_t gid;
inode = iget_locked(sb, ino);
if (!inode)
@@ -2754,12 +2804,17 @@ struct inode *ext4_iget(struct super_blo
bh = iloc.bh;
raw_inode = ext4_raw_inode(&iloc);
inode->i_mode = le16_to_cpu(raw_inode->i_mode);
- inode->i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
- inode->i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
+ uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
+ gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
if(!(test_opt (inode->i_sb, NO_UID32))) {
- inode->i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
- inode->i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
+ uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
+ gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
}
+ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+ le16_to_cpu(raw_inode->i_raw_tag));
+
inode->i_nlink = le16_to_cpu(raw_inode->i_links_count);
ei->i_state = 0;
@@ -2933,6 +2988,8 @@ static int ext4_do_update_inode(handle_t
struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
struct ext4_inode_info *ei = EXT4_I(inode);
struct buffer_head *bh = iloc->bh;
+ uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+ gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
int err = 0, rc, block;
/* For fields not not tracking in the in-memory inode,
@@ -2943,29 +3000,32 @@ static int ext4_do_update_inode(handle_t
ext4_get_inode_flags(ei);
raw_inode->i_mode = cpu_to_le16(inode->i_mode);
if(!(test_opt(inode->i_sb, NO_UID32))) {
- raw_inode->i_uid_low = cpu_to_le16(low_16_bits(inode->i_uid));
- raw_inode->i_gid_low = cpu_to_le16(low_16_bits(inode->i_gid));
+ raw_inode->i_uid_low = cpu_to_le16(low_16_bits(uid));
+ raw_inode->i_gid_low = cpu_to_le16(low_16_bits(gid));
/*
* Fix up interoperability with old kernels. Otherwise, old inodes get
* re-used with the upper 16 bits of the uid/gid intact
*/
if(!ei->i_dtime) {
raw_inode->i_uid_high =
- cpu_to_le16(high_16_bits(inode->i_uid));
+ cpu_to_le16(high_16_bits(uid));
raw_inode->i_gid_high =
- cpu_to_le16(high_16_bits(inode->i_gid));
+ cpu_to_le16(high_16_bits(gid));
} else {
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
} else {
raw_inode->i_uid_low =
- cpu_to_le16(fs_high2lowuid(inode->i_uid));
+ cpu_to_le16(fs_high2lowuid(uid));
raw_inode->i_gid_low =
- cpu_to_le16(fs_high2lowgid(inode->i_gid));
+ cpu_to_le16(fs_high2lowgid(gid));
raw_inode->i_uid_high = 0;
raw_inode->i_gid_high = 0;
}
+#ifdef CONFIG_TAGGING_INTERN
+ raw_inode->i_raw_tag = cpu_to_le16(inode->i_tag);
+#endif
raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
@@ -3121,7 +3181,8 @@ int ext4_setattr(struct dentry *dentry,
return error;
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
+ (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
handle_t *handle;
/* (user+group)*(old+new) structure, inode write (sb,
@@ -3143,6 +3204,8 @@ int ext4_setattr(struct dentry *dentry,
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
+ if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+ inode->i_tag = attr->ia_tag;
error = ext4_mark_inode_dirty(handle, inode);
ext4_journal_stop(handle);
}
--- a/fs/ext4/ioctl.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext4/ioctl.c 2008-04-19 15:14:52.000000000 -0400
@@ -8,6 +8,7 @@
*/
#include <linux/fs.h>
+#include <linux/mount.h>
#include <linux/jbd2.h>
#include <linux/capability.h>
#include <linux/ext4_fs.h>
@@ -15,6 +16,7 @@
#include <linux/time.h>
#include <linux/compat.h>
#include <linux/smp_lock.h>
+#include <linux/vs_tag.h>
#include <asm/uaccess.h>
int ext4_ioctl (struct inode * inode, struct file * filp, unsigned int cmd,
@@ -38,7 +40,8 @@ int ext4_ioctl (struct inode * inode, st
unsigned int oldflags;
unsigned int jflag;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (!is_owner_or_cap(inode))
@@ -67,7 +70,9 @@ int ext4_ioctl (struct inode * inode, st
*
* This test looks nicer. Thanks to Pauline Middelink
*/
- if ((flags ^ oldflags) & (EXT4_APPEND_FL | EXT4_IMMUTABLE_FL)) {
+ if ((oldflags & EXT4_IMMUTABLE_FL) ||
+ ((flags ^ oldflags) & (EXT4_APPEND_FL |
+ EXT4_IMMUTABLE_FL | EXT4_IUNLINK_FL))) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
mutex_unlock(&inode->i_mutex);
return -EPERM;
@@ -129,7 +134,8 @@ flags_err:
if (!is_owner_or_cap(inode))
return -EPERM;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (get_user(generation, (int __user *) arg))
return -EFAULT;
@@ -183,7 +189,8 @@ flags_err:
if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode))
return -ENOTTY;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (!is_owner_or_cap(inode))
@@ -218,7 +225,8 @@ flags_err:
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (get_user(n_blocks_count, (__u32 __user *)arg))
@@ -239,7 +247,8 @@ flags_err:
if (!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (copy_from_user(&input, (struct ext4_new_group_input __user *)arg,
--- a/fs/ext4/namei.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext4/namei.c 2008-04-19 15:14:52.000000000 -0400
@@ -36,6 +36,7 @@
#include <linux/quotaops.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
+#include <linux/vs_tag.h>
#include "namei.h"
#include "xattr.h"
@@ -908,6 +909,7 @@ restart:
if (bh)
ll_rw_block(READ_META, 1, &bh);
}
+ dx_propagate_tag(nd, inode);
}
if ((bh = bh_use[ra_ptr++]) == NULL)
goto next;
@@ -2448,6 +2450,7 @@ const struct inode_operations ext4_dir_i
.removexattr = generic_removexattr,
#endif
.permission = ext4_permission,
+ .sync_flags = ext4_sync_flags,
};
const struct inode_operations ext4_special_inode_operations = {
@@ -2459,4 +2462,5 @@ const struct inode_operations ext4_speci
.removexattr = generic_removexattr,
#endif
.permission = ext4_permission,
+ .sync_flags = ext4_sync_flags,
};
--- a/fs/ext4/super.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext4/super.c 2008-04-21 10:31:22.000000000 -0400
@@ -887,6 +887,7 @@ enum {
Opt_ignore, Opt_barrier, Opt_err, Opt_resize, Opt_usrquota,
Opt_grpquota, Opt_extents, Opt_noextents, Opt_i_version,
Opt_mballoc, Opt_nomballoc, Opt_stripe,
+ Opt_tag, Opt_notag, Opt_tagid
};
static match_table_t tokens = {
@@ -944,8 +945,11 @@ static match_table_t tokens = {
{Opt_mballoc, "mballoc"},
{Opt_nomballoc, "nomballoc"},
{Opt_stripe, "stripe=%u"},
- {Opt_err, NULL},
{Opt_resize, "resize"},
+ {Opt_tag, "tag"},
+ {Opt_notag, "notag"},
+ {Opt_tagid, "tagid=%u"},
+ {Opt_err, NULL},
};
static ext4_fsblk_t get_sb_block(void **data)
@@ -1037,6 +1041,20 @@ static int parse_options (char *options,
case Opt_nouid32:
set_opt (sbi->s_mount_opt, NO_UID32);
break;
+#ifndef CONFIG_TAGGING_NONE
+ case Opt_tag:
+ set_opt (sbi->s_mount_opt, TAGGED);
+ break;
+ case Opt_notag:
+ clear_opt (sbi->s_mount_opt, TAGGED);
+ break;
+#endif
+#ifdef CONFIG_PROPAGATE
+ case Opt_tagid:
+ /* use args[0] */
+ set_opt (sbi->s_mount_opt, TAGGED);
+ break;
+#endif
case Opt_nocheck:
clear_opt (sbi->s_mount_opt, CHECK);
break;
@@ -1909,6 +1927,9 @@ static int ext4_fill_super (struct super
NULL, 0))
goto failed_mount;
+ if (EXT4_SB(sb)->s_mount_opt & EXT4_MOUNT_TAGGED)
+ sb->s_flags |= MS_TAGGED;
+
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
@@ -2828,6 +2849,12 @@ static int ext4_remount (struct super_bl
if (sbi->s_mount_opt & EXT4_MOUNT_ABORT)
ext4_abort(sb, __FUNCTION__, "Abort forced by user");
+ if ((sbi->s_mount_opt & EXT4_MOUNT_TAGGED) &&
+ !(sb->s_flags & MS_TAGGED)) {
+ printk("EXT4-fs: %s: tagging not permitted on remount.\n",
+ sb->s_id);
+ return -EINVAL;
+ }
sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
((sbi->s_mount_opt & EXT4_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
--- a/fs/ext4/symlink.c 2008-04-17 10:32:27.000000000 -0400
+++ a/fs/ext4/symlink.c 2008-04-19 15:14:52.000000000 -0400
@@ -40,6 +40,7 @@ const struct inode_operations ext4_symli
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
#endif
+ .sync_flags = ext4_sync_flags,
};
const struct inode_operations ext4_fast_symlink_inode_operations = {
@@ -51,4 +52,5 @@ const struct inode_operations ext4_fast_
.listxattr = ext4_listxattr,
.removexattr = generic_removexattr,
#endif
+ .sync_flags = ext4_sync_flags,
};
--- a/fs/ext4/xattr.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ext4/xattr.c 2008-04-19 15:14:52.000000000 -0400
@@ -58,6 +58,7 @@
#include <linux/mbcache.h>
#include <linux/quotaops.h>
#include <linux/rwsem.h>
+#include <linux/vs_dlimit.h>
#include "xattr.h"
#include "acl.h"
@@ -489,6 +490,7 @@ ext4_xattr_release_block(handle_t *handl
error = ext4_journal_dirty_metadata(handle, bh);
if (IS_SYNC(inode))
handle->h_sync = 1;
+ DLIMIT_FREE_BLOCK(inode, 1);
DQUOT_FREE_BLOCK(inode, 1);
ea_bdebug(bh, "refcount now=%d; releasing",
le32_to_cpu(BHDR(bh)->h_refcount));
@@ -779,11 +781,14 @@ inserted:
if (new_bh == bs->bh)
ea_bdebug(new_bh, "keeping");
else {
+ error = -ENOSPC;
+ if (DLIMIT_ALLOC_BLOCK(inode, 1))
+ goto cleanup;
/* The old block is released after updating
the inode. */
error = -EDQUOT;
if (DQUOT_ALLOC_BLOCK(inode, 1))
- goto cleanup;
+ goto cleanup_dlimit;
error = ext4_journal_get_write_access(handle,
new_bh);
if (error)
@@ -860,6 +865,8 @@ cleanup:
cleanup_dquot:
DQUOT_FREE_BLOCK(inode, 1);
+cleanup_dlimit:
+ DLIMIT_FREE_BLOCK(inode, 1);
goto cleanup;
bad_block:
--- a/fs/fcntl.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/fcntl.c 2008-04-19 15:14:52.000000000 -0400
@@ -19,6 +19,7 @@
#include <linux/signal.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
+#include <linux/vs_limit.h>
#include <asm/poll.h>
#include <asm/siginfo.h>
@@ -85,6 +86,8 @@ repeat:
error = -EMFILE;
if (newfd >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
goto out;
+ if (!vx_files_avail(1))
+ goto out;
error = expand_files(files, newfd);
if (error < 0)
@@ -128,6 +131,7 @@ static int dupfd(struct file *file, unsi
else
FD_CLR(fd, fdt->close_on_exec);
spin_unlock(&files->file_lock);
+ vx_openfd_inc(fd);
fd_install(fd, file);
} else {
spin_unlock(&files->file_lock);
@@ -180,6 +184,9 @@ asmlinkage long sys_dup2(unsigned int ol
if (tofree)
filp_close(tofree, files);
+ else
+ vx_openfd_inc(newfd); /* fd was unused */
+
err = newfd;
out:
return err;
--- a/fs/file_table.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/file_table.c 2008-04-19 15:14:52.000000000 -0400
@@ -20,6 +20,8 @@
#include <linux/fsnotify.h>
#include <linux/sysctl.h>
#include <linux/percpu_counter.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
#include <asm/atomic.h>
@@ -124,6 +126,8 @@ struct file *get_empty_filp(void)
f->f_gid = tsk->fsgid;
eventpoll_init_file(f);
/* f->f_version: 0 */
+ f->f_xid = vx_current_xid();
+ vx_files_inc(f);
return f;
over:
@@ -239,6 +243,8 @@ void __fput(struct file *file)
if (file->f_mode & FMODE_WRITE)
put_write_access(inode);
put_pid(file->f_owner.pid);
+ vx_files_dec(file);
+ file->f_xid = 0;
file_kill(file);
file->f_path.dentry = NULL;
file->f_path.mnt = NULL;
@@ -304,6 +310,8 @@ void put_filp(struct file *file)
{
if (atomic_dec_and_test(&file->f_count)) {
security_file_free(file);
+ vx_files_dec(file);
+ file->f_xid = 0;
file_kill(file);
file_free(file);
}
--- a/fs/hfsplus/ioctl.c 2008-04-17 10:37:23.000000000 -0400
+++ a/fs/hfsplus/ioctl.c 2008-04-19 15:14:52.000000000 -0400
@@ -16,6 +16,7 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/xattr.h>
+#include <linux/mount.h>
#include <asm/uaccess.h>
#include "hfsplus_fs.h"
@@ -35,7 +36,8 @@ int hfsplus_ioctl(struct inode *inode, s
flags |= FS_NODUMP_FL; /* EXT2_NODUMP_FL */
return put_user(flags, (int __user *)arg);
case HFSPLUS_IOC_EXT2_SETFLAGS: {
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (!is_owner_or_cap(inode))
--- a/fs/inode.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/inode.c 2008-04-19 15:14:52.000000000 -0400
@@ -124,6 +124,9 @@ static struct inode *alloc_inode(struct
struct address_space * const mapping = &inode->i_data;
inode->i_sb = sb;
+
+ /* essential because of inode slab reuse */
+ inode->i_tag = 0;
inode->i_blkbits = sb->s_blocksize_bits;
inode->i_flags = 0;
atomic_set(&inode->i_count, 1);
@@ -142,6 +145,7 @@ static struct inode *alloc_inode(struct
inode->i_bdev = NULL;
inode->i_cdev = NULL;
inode->i_rdev = 0;
+ inode->i_mdev = 0;
inode->dirtied_when = 0;
if (security_inode_alloc(inode)) {
if (inode->i_sb->s_op->destroy_inode)
@@ -246,6 +250,8 @@ void __iget(struct inode * inode)
inodes_stat.nr_unused--;
}
+EXPORT_SYMBOL_GPL(__iget);
+
/**
* clear_inode - clear an inode
* @inode: inode to clear
@@ -1434,9 +1440,11 @@ void init_special_inode(struct inode *in
if (S_ISCHR(mode)) {
inode->i_fop = &def_chr_fops;
inode->i_rdev = rdev;
+ inode->i_mdev = rdev;
} else if (S_ISBLK(mode)) {
inode->i_fop = &def_blk_fops;
inode->i_rdev = rdev;
+ inode->i_mdev = rdev;
} else if (S_ISFIFO(mode))
inode->i_fop = &def_fifo_fops;
else if (S_ISSOCK(mode))
--- a/fs/ioctl.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ioctl.c 2008-04-21 09:25:46.000000000 -0400
@@ -13,6 +13,9 @@
#include <linux/security.h>
#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/proc_fs.h>
+#include <linux/vserver/inode.h>
+#include <linux/vs_tag.h>
#include <asm/ioctls.h>
--- a/fs/ioprio.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ioprio.c 2008-04-19 15:14:52.000000000 -0400
@@ -26,6 +26,7 @@
#include <linux/syscalls.h>
#include <linux/security.h>
#include <linux/pid_namespace.h>
+#include <linux/vs_base.h>
static int set_task_ioprio(struct task_struct *task, int ioprio)
{
@@ -116,6 +117,8 @@ asmlinkage long sys_ioprio_set(int which
else
pgrp = find_vpid(who);
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
+ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+ continue;
ret = set_task_ioprio(p, ioprio);
if (ret)
break;
@@ -205,6 +208,8 @@ asmlinkage long sys_ioprio_get(int which
else
pgrp = find_vpid(who);
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
+ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+ continue;
tmpio = get_task_ioprio(p);
if (tmpio < 0)
continue;
--- a/fs/jfs/acl.c 2007-02-04 13:44:54.000000000 -0500
+++ a/fs/jfs/acl.c 2008-04-19 15:14:52.000000000 -0400
@@ -232,7 +232,8 @@ int jfs_setattr(struct dentry *dentry, s
return rc;
if ((iattr->ia_valid & ATTR_UID && iattr->ia_uid != inode->i_uid) ||
- (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid)) {
+ (iattr->ia_valid & ATTR_GID && iattr->ia_gid != inode->i_gid) ||
+ (iattr->ia_valid & ATTR_TAG && iattr->ia_tag != inode->i_tag)) {
if (DQUOT_TRANSFER(inode, iattr))
return -EDQUOT;
}
--- a/fs/jfs/file.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/file.c 2008-04-19 15:14:52.000000000 -0400
@@ -98,6 +98,7 @@ const struct inode_operations jfs_file_i
.setattr = jfs_setattr,
.permission = jfs_permission,
#endif
+ .sync_flags = jfs_sync_flags,
};
const struct file_operations jfs_file_operations = {
--- a/fs/jfs/inode.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/inode.c 2008-04-19 15:14:52.000000000 -0400
@@ -22,6 +22,7 @@
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
+#include <linux/vs_dlimit.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
@@ -155,6 +156,7 @@ void jfs_delete_inode(struct inode *inod
DQUOT_INIT(inode);
DQUOT_FREE_INODE(inode);
DQUOT_DROP(inode);
+ DLIMIT_FREE_INODE(inode);
}
clear_inode(inode);
--- a/fs/jfs/ioctl.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/ioctl.c 2008-04-21 09:25:22.000000000 -0400
@@ -10,6 +10,7 @@
#include <linux/capability.h>
#include <linux/time.h>
#include <linux/sched.h>
+#include <linux/mount.h>
#include <asm/current.h>
#include <asm/uaccess.h>
@@ -66,7 +67,8 @@ long jfs_ioctl(struct file *filp, unsign
case JFS_IOC_SETFLAGS: {
unsigned int oldflags;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (!is_owner_or_cap(inode))
@@ -94,8 +96,8 @@ long jfs_ioctl(struct file *filp, unsign
* the relevant capability.
*/
if ((oldflags & JFS_IMMUTABLE_FL) ||
- ((flags ^ oldflags) &
- (JFS_APPEND_FL | JFS_IMMUTABLE_FL))) {
+ ((flags ^ oldflags) & (JFS_APPEND_FL |
+ JFS_IMMUTABLE_FL | JFS_IUNLINK_FL))) {
if (!capable(CAP_LINUX_IMMUTABLE)) {
mutex_unlock(&inode->i_mutex);
return -EPERM;
--- a/fs/jfs/jfs_dinode.h 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/jfs_dinode.h 2008-04-19 15:14:52.000000000 -0400
@@ -162,9 +162,12 @@ struct dinode {
#define JFS_APPEND_FL 0x01000000 /* writes to file may only append */
#define JFS_IMMUTABLE_FL 0x02000000 /* Immutable file */
-#define JFS_FL_USER_VISIBLE 0x03F80000
+#define JFS_BARRIER_FL 0x04000000 /* Barrier for chroot() */
+#define JFS_IUNLINK_FL 0x08000000 /* Immutable unlink */
+
+#define JFS_FL_USER_VISIBLE 0x0FF80000
#define JFS_FL_USER_MODIFIABLE 0x03F80000
-#define JFS_FL_INHERIT 0x03C80000
+#define JFS_FL_INHERIT 0x0BC80000
/* These are identical to EXT[23]_IOC_GETFLAGS/SETFLAGS */
#define JFS_IOC_GETFLAGS _IOR('f', 1, long)
--- a/fs/jfs/jfs_dtree.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/jfs_dtree.c 2008-04-19 15:14:52.000000000 -0400
@@ -102,6 +102,7 @@
#include <linux/fs.h>
#include <linux/quotaops.h>
+#include <linux/vs_dlimit.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_filsys.h"
@@ -383,10 +384,10 @@ static u32 add_index(tid_t tid, struct i
*/
if (DQUOT_ALLOC_BLOCK(ip, sbi->nbperpage))
goto clean_up;
- if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr)) {
- DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
- goto clean_up;
- }
+ if (DLIMIT_ALLOC_BLOCK(ip, sbi->nbperpage))
+ goto clean_up_dquot;
+ if (dbAlloc(ip, 0, sbi->nbperpage, &xaddr))
+ goto clean_up_dlimit;
/*
* Save the table, we're going to overwrite it with the
@@ -480,6 +481,12 @@ static u32 add_index(tid_t tid, struct i
return index;
+ clean_up_dlimit:
+ DLIMIT_FREE_BLOCK(ip, sbi->nbperpage);
+
+ clean_up_dquot:
+ DQUOT_FREE_BLOCK(ip, sbi->nbperpage);
+
clean_up:
jfs_ip->next_index--;
@@ -951,6 +958,7 @@ static int dtSplitUp(tid_t tid,
struct tlock *tlck;
struct lv *lv;
int quota_allocation = 0;
+ int dlimit_allocation = 0;
/* get split page */
smp = split->mp;
@@ -1033,6 +1041,12 @@ static int dtSplitUp(tid_t tid,
}
quota_allocation += n;
+ if (DLIMIT_ALLOC_BLOCK(ip, n)) {
+ rc = -ENOSPC;
+ goto extendOut;
+ }
+ dlimit_allocation += n;
+
if ((rc = dbReAlloc(sbi->ipbmap, xaddr, (s64) xlen,
(s64) n, &nxaddr)))
goto extendOut;
@@ -1306,6 +1320,9 @@ static int dtSplitUp(tid_t tid,
freeKeyName:
kfree(key.name);
+ /* Rollback dlimit allocation */
+ if (rc && dlimit_allocation)
+ DLIMIT_FREE_BLOCK(ip, dlimit_allocation);
/* Rollback quota allocation */
if (rc && quota_allocation)
DQUOT_FREE_BLOCK(ip, quota_allocation);
@@ -1373,6 +1390,12 @@ static int dtSplitPage(tid_t tid, struct
release_metapage(rmp);
return -EDQUOT;
}
+ /* Allocate blocks to dlimit. */
+ if (DLIMIT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ DQUOT_FREE_BLOCK(ip, lengthPXD(pxd));
+ release_metapage(rmp);
+ return -ENOSPC;
+ }
jfs_info("dtSplitPage: ip:0x%p smp:0x%p rmp:0x%p", ip, smp, rmp);
@@ -1920,6 +1943,12 @@ static int dtSplitRoot(tid_t tid,
release_metapage(rmp);
return -EDQUOT;
}
+ /* Allocate blocks to dlimit. */
+ if (DLIMIT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ DQUOT_FREE_BLOCK(ip, lengthPXD(pxd));
+ release_metapage(rmp);
+ return -ENOSPC;
+ }
BT_MARK_DIRTY(rmp, ip);
/*
@@ -2286,6 +2315,8 @@ static int dtDeleteUp(tid_t tid, struct
xlen = lengthPXD(&fp->header.self);
+ /* Free dlimit allocation. */
+ DLIMIT_FREE_BLOCK(ip, xlen);
/* Free quota allocation. */
DQUOT_FREE_BLOCK(ip, xlen);
@@ -2362,6 +2393,8 @@ static int dtDeleteUp(tid_t tid, struct
xlen = lengthPXD(&p->header.self);
+ /* Free dlimit allocation */
+ DLIMIT_FREE_BLOCK(ip, xlen);
/* Free quota allocation */
DQUOT_FREE_BLOCK(ip, xlen);
--- a/fs/jfs/jfs_extent.c 2008-04-17 10:37:23.000000000 -0400
+++ a/fs/jfs/jfs_extent.c 2008-04-19 15:14:52.000000000 -0400
@@ -18,6 +18,7 @@
#include <linux/fs.h>
#include <linux/quotaops.h>
+#include <linux/vs_dlimit.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_superblock.h"
@@ -147,6 +148,14 @@ extAlloc(struct inode *ip, s64 xlen, s64
return -EDQUOT;
}
+ /* Allocate blocks to dlimit. */
+ if (DLIMIT_ALLOC_BLOCK(ip, nxlen)) {
+ DQUOT_FREE_BLOCK(ip, nxlen);
+ dbFree(ip, nxaddr, (s64) nxlen);
+ mutex_unlock(&JFS_IP(ip)->commit_mutex);
+ return -ENOSPC;
+ }
+
/* determine the value of the extent flag */
xflag = abnr ? XAD_NOTRECORDED : 0;
@@ -164,6 +173,7 @@ extAlloc(struct inode *ip, s64 xlen, s64
*/
if (rc) {
dbFree(ip, nxaddr, nxlen);
+ DLIMIT_FREE_BLOCK(ip, nxlen);
DQUOT_FREE_BLOCK(ip, nxlen);
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return (rc);
@@ -261,6 +271,13 @@ int extRealloc(struct inode *ip, s64 nxl
mutex_unlock(&JFS_IP(ip)->commit_mutex);
return -EDQUOT;
}
+ /* Allocate blocks to dlimit. */
+ if (DLIMIT_ALLOC_BLOCK(ip, nxlen)) {
+ DQUOT_FREE_BLOCK(ip, nxlen);
+ dbFree(ip, nxaddr, (s64) nxlen);
+ up(&JFS_IP(ip)->commit_sem);
+ return -ENOSPC;
+ }
delta = nxlen - xlen;
@@ -297,6 +314,7 @@ int extRealloc(struct inode *ip, s64 nxl
/* extend the extent */
if ((rc = xtExtend(0, ip, xoff + xlen, (int) nextend, 0))) {
dbFree(ip, xaddr + xlen, delta);
+ DLIMIT_FREE_BLOCK(ip, nxlen);
DQUOT_FREE_BLOCK(ip, nxlen);
goto exit;
}
@@ -308,6 +326,7 @@ int extRealloc(struct inode *ip, s64 nxl
*/
if ((rc = xtTailgate(0, ip, xoff, (int) ntail, nxaddr, 0))) {
dbFree(ip, nxaddr, nxlen);
+ DLIMIT_FREE_BLOCK(ip, nxlen);
DQUOT_FREE_BLOCK(ip, nxlen);
goto exit;
}
--- a/fs/jfs/jfs_filsys.h 2008-04-17 10:37:23.000000000 -0400
+++ a/fs/jfs/jfs_filsys.h 2008-04-19 15:14:52.000000000 -0400
@@ -263,6 +263,7 @@
#define JFS_NAME_MAX 255
#define JFS_PATH_MAX BPSIZE
+#define JFS_TAGGED 0x00800000 /* Context Tagging */
/*
* file system state (superblock state)
--- a/fs/jfs/jfs_imap.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/jfs_imap.c 2008-04-19 15:14:52.000000000 -0400
@@ -45,6 +45,7 @@
#include <linux/buffer_head.h>
#include <linux/pagemap.h>
#include <linux/quotaops.h>
+#include <linux/vs_tag.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
@@ -3061,6 +3062,8 @@ static int copy_from_dinode(struct dinod
{
struct jfs_inode_info *jfs_ip = JFS_IP(ip);
struct jfs_sb_info *sbi = JFS_SBI(ip->i_sb);
+ uid_t uid;
+ gid_t gid;
jfs_ip->fileset = le32_to_cpu(dip->di_fileset);
jfs_ip->mode2 = le32_to_cpu(dip->di_mode);
@@ -3081,14 +3084,18 @@ static int copy_from_dinode(struct dinod
}
ip->i_nlink = le32_to_cpu(dip->di_nlink);
- jfs_ip->saved_uid = le32_to_cpu(dip->di_uid);
+ uid = le32_to_cpu(dip->di_uid);
+ gid = le32_to_cpu(dip->di_gid);
+ ip->i_tag = INOTAG_TAG(DX_TAG(ip), uid, gid, 0);
+
+ jfs_ip->saved_uid = INOTAG_UID(DX_TAG(ip), uid, gid);
if (sbi->uid == -1)
ip->i_uid = jfs_ip->saved_uid;
else {
ip->i_uid = sbi->uid;
}
- jfs_ip->saved_gid = le32_to_cpu(dip->di_gid);
+ jfs_ip->saved_gid = INOTAG_GID(DX_TAG(ip), uid, gid);
if (sbi->gid == -1)
ip->i_gid = jfs_ip->saved_gid;
else {
@@ -3153,14 +3160,12 @@ static void copy_to_dinode(struct dinode
dip->di_size = cpu_to_le64(ip->i_size);
dip->di_nblocks = cpu_to_le64(PBLK2LBLK(ip->i_sb, ip->i_blocks));
dip->di_nlink = cpu_to_le32(ip->i_nlink);
- if (sbi->uid == -1)
- dip->di_uid = cpu_to_le32(ip->i_uid);
- else
- dip->di_uid = cpu_to_le32(jfs_ip->saved_uid);
- if (sbi->gid == -1)
- dip->di_gid = cpu_to_le32(ip->i_gid);
- else
- dip->di_gid = cpu_to_le32(jfs_ip->saved_gid);
+
+ dip->di_uid = cpu_to_le32(TAGINO_UID(DX_TAG(ip),
+ (sbi->uid == -1) ? ip->i_uid : jfs_ip->saved_uid, ip->i_tag));
+ dip->di_gid = cpu_to_le32(TAGINO_GID(DX_TAG(ip),
+ (sbi->gid == -1) ? ip->i_gid : jfs_ip->saved_gid, ip->i_tag));
+
jfs_get_inode_flags(jfs_ip);
/*
* mode2 is only needed for storing the higher order bits.
--- a/fs/jfs/jfs_inode.c 2008-04-17 10:33:02.000000000 -0400
+++ a/fs/jfs/jfs_inode.c 2008-04-19 15:14:52.000000000 -0400
@@ -18,6 +18,8 @@
#include <linux/fs.h>
#include <linux/quotaops.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vs_tag.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
@@ -30,19 +32,47 @@ void jfs_set_inode_flags(struct inode *i
{
unsigned int flags = JFS_IP(inode)->mode2;
- inode->i_flags &= ~(S_IMMUTABLE | S_APPEND |
- S_NOATIME | S_DIRSYNC | S_SYNC);
+ inode->i_flags &= ~(S_IMMUTABLE | S_IUNLINK | S_BARRIER |
+ S_SYNC | S_APPEND | S_NOATIME | S_DIRSYNC);
if (flags & JFS_IMMUTABLE_FL)
inode->i_flags |= S_IMMUTABLE;
+ if (flags & JFS_IUNLINK_FL)
+ inode->i_flags |= S_IUNLINK;
+ if (flags & JFS_BARRIER_FL)
+ inode->i_flags |= S_BARRIER;
+
+ if (flags & JFS_SYNC_FL)
+ inode->i_flags |= S_SYNC;
if (flags & JFS_APPEND_FL)
inode->i_flags |= S_APPEND;
if (flags & JFS_NOATIME_FL)
inode->i_flags |= S_NOATIME;
if (flags & JFS_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
- if (flags & JFS_SYNC_FL)
- inode->i_flags |= S_SYNC;
+}
+
+int jfs_sync_flags(struct inode *inode)
+{
+ unsigned int oldflags, newflags;
+
+ oldflags = JFS_IP(inode)->mode2;
+ newflags = oldflags & ~(JFS_IMMUTABLE_FL |
+ JFS_IUNLINK_FL | JFS_BARRIER_FL);
+
+ if (IS_IMMUTABLE(inode))
+ newflags |= JFS_IMMUTABLE_FL;
+ if (IS_IUNLINK(inode))
+ newflags |= JFS_IUNLINK_FL;
+ if (IS_BARRIER(inode))
+ newflags |= JFS_BARRIER_FL;
+
+ if (oldflags ^ newflags) {
+ JFS_IP(inode)->mode2 = newflags;
+ inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+ }
+ return 0;
}
void jfs_get_inode_flags(struct jfs_inode_info *jfs_ip)
@@ -108,10 +138,17 @@ struct inode *ialloc(struct inode *paren
jfs_inode->saved_uid = inode->i_uid;
jfs_inode->saved_gid = inode->i_gid;
+ inode->i_tag = dx_current_fstag(sb);
+ if (DLIMIT_ALLOC_INODE(inode)) {
+ iput(inode);
+ return ERR_PTR(-ENOSPC);
+ }
+
/*
* Allocate inode to quota.
*/
if (DQUOT_ALLOC_INODE(inode)) {
+ DLIMIT_FREE_INODE(inode);
DQUOT_DROP(inode);
inode->i_flags |= S_NOQUOTA;
inode->i_nlink = 0;
--- a/fs/jfs/jfs_inode.h 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/jfs_inode.h 2008-04-19 15:14:52.000000000 -0400
@@ -39,6 +39,7 @@ extern struct dentry *jfs_fh_to_dentry(s
extern struct dentry *jfs_fh_to_parent(struct super_block *sb, struct fid *fid,
int fh_len, int fh_type);
extern void jfs_set_inode_flags(struct inode *);
+extern int jfs_sync_flags(struct inode *);
extern int jfs_get_block(struct inode *, sector_t, struct buffer_head *, int);
extern const struct address_space_operations jfs_aops;
--- a/fs/jfs/jfs_xtree.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/jfs_xtree.c 2008-04-19 15:14:52.000000000 -0400
@@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/quotaops.h>
+#include <linux/vs_dlimit.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
@@ -846,7 +847,12 @@ int xtInsert(tid_t tid, /* transaction
hint = 0;
if ((rc = DQUOT_ALLOC_BLOCK(ip, xlen)))
goto out;
+ if ((rc = DLIMIT_ALLOC_BLOCK(ip, xlen))) {
+ DQUOT_FREE_BLOCK(ip, xlen);
+ goto out;
+ }
if ((rc = dbAlloc(ip, hint, (s64) xlen, &xaddr))) {
+ DLIMIT_FREE_BLOCK(ip, xlen);
DQUOT_FREE_BLOCK(ip, xlen);
goto out;
}
@@ -876,6 +882,7 @@ int xtInsert(tid_t tid, /* transaction
/* undo data extent allocation */
if (*xaddrp == 0) {
dbFree(ip, xaddr, (s64) xlen);
+ DLIMIT_FREE_BLOCK(ip, xlen);
DQUOT_FREE_BLOCK(ip, xlen);
}
return rc;
@@ -1236,6 +1243,7 @@ xtSplitPage(tid_t tid, struct inode *ip,
struct tlock *tlck;
struct xtlock *sxtlck = NULL, *rxtlck = NULL;
int quota_allocation = 0;
+ int dlimit_allocation = 0;
smp = split->mp;
sp = XT_PAGE(ip, smp);
@@ -1255,6 +1263,13 @@ xtSplitPage(tid_t tid, struct inode *ip,
quota_allocation += lengthPXD(pxd);
+ /* Allocate blocks to dlimit. */
+ if (DLIMIT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ rc = -ENOSPC;
+ goto clean_up;
+ }
+ dlimit_allocation += lengthPXD(pxd);
+
/*
* allocate the new right page for the split
*/
@@ -1456,6 +1471,9 @@ xtSplitPage(tid_t tid, struct inode *ip,
clean_up:
+ /* Rollback dlimit allocation. */
+ if (dlimit_allocation)
+ DLIMIT_FREE_BLOCK(ip, dlimit_allocation);
/* Rollback quota allocation. */
if (quota_allocation)
DQUOT_FREE_BLOCK(ip, quota_allocation);
@@ -1519,6 +1537,12 @@ xtSplitRoot(tid_t tid,
release_metapage(rmp);
return -EDQUOT;
}
+ /* Allocate blocks to dlimit. */
+ if (DLIMIT_ALLOC_BLOCK(ip, lengthPXD(pxd))) {
+ DQUOT_FREE_BLOCK(ip, lengthPXD(pxd));
+ release_metapage(rmp);
+ return -ENOSPC;
+ }
jfs_info("xtSplitRoot: ip:0x%p rmp:0x%p", ip, rmp);
@@ -3948,6 +3972,8 @@ s64 xtTruncate(tid_t tid, struct inode *
else
ip->i_size = newsize;
+ /* update dlimit allocation to reflect freed blocks */
+ DLIMIT_FREE_BLOCK(ip, nfreed);
/* update quota allocation to reflect freed blocks */
DQUOT_FREE_BLOCK(ip, nfreed);
--- a/fs/jfs/namei.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/namei.c 2008-04-19 15:14:52.000000000 -0400
@@ -21,6 +21,7 @@
#include <linux/ctype.h>
#include <linux/quotaops.h>
#include <linux/exportfs.h>
+#include <linux/vs_tag.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_inode.h"
@@ -1468,6 +1469,7 @@ static struct dentry *jfs_lookup(struct
return ERR_CAST(ip);
}
+ dx_propagate_tag(nd, ip);
dentry = d_splice_alias(ip, dentry);
if (dentry && (JFS_SBI(dip->i_sb)->mntflag & JFS_OS2))
@@ -1550,6 +1552,7 @@ const struct inode_operations jfs_dir_in
.setattr = jfs_setattr,
.permission = jfs_permission,
#endif
+ .sync_flags = jfs_sync_flags,
};
const struct file_operations jfs_dir_operations = {
--- a/fs/jfs/super.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/jfs/super.c 2008-04-19 15:14:52.000000000 -0400
@@ -195,7 +195,8 @@ static void jfs_put_super(struct super_b
enum {
Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
- Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask
+ Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask,
+ Opt_tag, Opt_notag, Opt_tagid
};
static match_table_t tokens = {
@@ -205,6 +206,10 @@ static match_table_t tokens = {
{Opt_resize, "resize=%u"},
{Opt_resize_nosize, "resize"},
{Opt_errors, "errors=%s"},
+ {Opt_tag, "tag"},
+ {Opt_notag, "notag"},
+ {Opt_tagid, "tagid=%u"},
+ {Opt_tag, "tagxid"},
{Opt_ignore, "noquota"},
{Opt_ignore, "quota"},
{Opt_usrquota, "usrquota"},
@@ -339,6 +344,20 @@ static int parse_options(char *options,
}
break;
}
+#ifndef CONFIG_TAGGING_NONE
+ case Opt_tag:
+ *flag |= JFS_TAGGED;
+ break;
+ case Opt_notag:
+ *flag &= JFS_TAGGED;
+ break;
+#endif
+#ifdef CONFIG_PROPAGATE
+ case Opt_tagid:
+ /* use args[0] */
+ *flag |= JFS_TAGGED;
+ break;
+#endif
default:
printk("jfs: Unrecognized mount option \"%s\" "
" or missing value\n", p);
@@ -369,6 +388,13 @@ static int jfs_remount(struct super_bloc
if (!parse_options(data, sb, &newLVSize, &flag)) {
return -EINVAL;
}
+
+ if ((flag & JFS_TAGGED) && !(sb->s_flags & MS_TAGGED)) {
+ printk(KERN_ERR "JFS: %s: tagging not permitted on remount.\n",
+ sb->s_id);
+ return -EINVAL;
+ }
+
if (newLVSize) {
if (sb->s_flags & MS_RDONLY) {
printk(KERN_ERR
@@ -440,6 +466,9 @@ static int jfs_fill_super(struct super_b
#ifdef CONFIG_JFS_POSIX_ACL
sb->s_flags |= MS_POSIXACL;
#endif
+ /* map mount option tagxid */
+ if (sbi->flag & JFS_TAGGED)
+ sb->s_flags |= MS_TAGGED;
if (newLVSize) {
printk(KERN_ERR "resize option for remount only\n");
--- a/fs/jfs/xattr.c 2008-04-17 10:37:23.000000000 -0400
+++ a/fs/jfs/xattr.c 2008-04-19 15:14:52.000000000 -0400
@@ -23,6 +23,7 @@
#include <linux/posix_acl_xattr.h>
#include <linux/quotaops.h>
#include <linux/security.h>
+#include <linux/vs_dlimit.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_dmap.h"
@@ -263,9 +264,16 @@ static int ea_write(struct inode *ip, st
if (DQUOT_ALLOC_BLOCK(ip, nblocks)) {
return -EDQUOT;
}
+ /* Allocate new blocks to dlimit. */
+ if (DLIMIT_ALLOC_BLOCK(ip, nblocks)) {
+ DQUOT_FREE_BLOCK(ip, nblocks);
+ return -ENOSPC;
+ }
rc = dbAlloc(ip, INOHINT(ip), nblocks, &blkno);
if (rc) {
+ /*Rollback dlimit allocation. */
+ DLIMIT_FREE_BLOCK(ip, nblocks);
/*Rollback quota allocation. */
DQUOT_FREE_BLOCK(ip, nblocks);
return rc;
@@ -332,6 +340,8 @@ static int ea_write(struct inode *ip, st
failed:
/* Rollback quota allocation. */
+ DLIMIT_FREE_BLOCK(ip, nblocks);
+ /* Rollback quota allocation. */
DQUOT_FREE_BLOCK(ip, nblocks);
dbFree(ip, blkno, nblocks);
@@ -468,6 +478,7 @@ static int ea_get(struct inode *inode, s
s64 blkno;
int rc;
int quota_allocation = 0;
+ int dlimit_allocation = 0;
/* When fsck.jfs clears a bad ea, it doesn't clear the size */
if (ji->ea.flag == 0)
@@ -543,6 +554,12 @@ static int ea_get(struct inode *inode, s
quota_allocation = blocks_needed;
+ /* Allocate new blocks to dlimit. */
+ rc = -ENOSPC;
+ if (DLIMIT_ALLOC_BLOCK(inode, blocks_needed))
+ goto clean_up;
+ dlimit_allocation = blocks_needed;
+
rc = dbAlloc(inode, INOHINT(inode), (s64) blocks_needed,
&blkno);
if (rc)
@@ -600,6 +617,9 @@ static int ea_get(struct inode *inode, s
return ea_size;
clean_up:
+ /* Rollback dlimit allocation */
+ if (dlimit_allocation)
+ DLIMIT_FREE_BLOCK(inode, dlimit_allocation);
/* Rollback quota allocation */
if (quota_allocation)
DQUOT_FREE_BLOCK(inode, quota_allocation);
@@ -676,8 +696,10 @@ static int ea_put(tid_t tid, struct inod
}
/* If old blocks exist, they must be removed from quota allocation. */
- if (old_blocks)
+ if (old_blocks) {
+ DLIMIT_FREE_BLOCK(inode, old_blocks);
DQUOT_FREE_BLOCK(inode, old_blocks);
+ }
inode->i_ctime = CURRENT_TIME;
--- a/fs/libfs.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/libfs.c 2008-04-19 15:14:52.000000000 -0400
@@ -125,7 +125,8 @@ static inline unsigned char dt_type(stru
* both impossible due to the lock on directory.
*/
-int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
+static inline int do_dcache_readdir_filter(struct file *filp,
+ void *dirent, filldir_t filldir, int (*filter)(struct dentry *dentry))
{
struct dentry *dentry = filp->f_path.dentry;
struct dentry *cursor = filp->private_data;
@@ -158,6 +159,8 @@ int dcache_readdir(struct file * filp, v
next = list_entry(p, struct dentry, d_u.d_child);
if (d_unhashed(next) || !next->d_inode)
continue;
+ if (filter && !filter(next))
+ continue;
spin_unlock(&dcache_lock);
if (filldir(dirent, next->d_name.name,
@@ -176,6 +179,18 @@ int dcache_readdir(struct file * filp, v
return 0;
}
+int dcache_readdir(struct file *filp, void *dirent, filldir_t filldir)
+{
+ return do_dcache_readdir_filter(filp, dirent, filldir, NULL);
+}
+
+int dcache_readdir_filter(struct file *filp, void *dirent, filldir_t filldir,
+ int (*filter)(struct dentry *))
+{
+ return do_dcache_readdir_filter(filp, dirent, filldir, filter);
+}
+
+
ssize_t generic_read_dir(struct file *filp, char __user *buf, size_t siz, loff_t *ppos)
{
return -EISDIR;
@@ -778,6 +793,7 @@ EXPORT_SYMBOL(dcache_dir_close);
EXPORT_SYMBOL(dcache_dir_lseek);
EXPORT_SYMBOL(dcache_dir_open);
EXPORT_SYMBOL(dcache_readdir);
+EXPORT_SYMBOL(dcache_readdir_filter);
EXPORT_SYMBOL(generic_read_dir);
EXPORT_SYMBOL(get_sb_pseudo);
EXPORT_SYMBOL(simple_write_begin);
--- a/fs/locks.c 2008-05-21 14:30:05.000000000 -0400
+++ a/fs/locks.c 2008-05-21 14:30:41.000000000 -0400
@@ -126,6 +126,8 @@
#include <linux/time.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
#include <asm/semaphore.h>
#include <asm/uaccess.h>
@@ -148,6 +150,8 @@ static struct kmem_cache *filelock_cache
/* Allocate an empty lock structure. */
static struct file_lock *locks_alloc_lock(void)
{
+ if (!vx_locks_avail(1))
+ return NULL;
return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
}
@@ -173,6 +177,7 @@ static void locks_free_lock(struct file_
BUG_ON(!list_empty(&fl->fl_block));
BUG_ON(!list_empty(&fl->fl_link));
+ vx_locks_dec(fl);
locks_release_private(fl);
kmem_cache_free(filelock_cache, fl);
}
@@ -193,6 +198,7 @@ void locks_init_lock(struct file_lock *f
fl->fl_start = fl->fl_end = 0;
fl->fl_ops = NULL;
fl->fl_lmops = NULL;
+ fl->fl_xid = -1;
}
EXPORT_SYMBOL(locks_init_lock);
@@ -246,6 +252,7 @@ void locks_copy_lock(struct file_lock *n
new->fl_file = fl->fl_file;
new->fl_ops = fl->fl_ops;
new->fl_lmops = fl->fl_lmops;
+ new->fl_xid = fl->fl_xid;
locks_copy_private(new, fl);
}
@@ -284,6 +291,11 @@ static int flock_make_lock(struct file *
fl->fl_flags = FL_FLOCK;
fl->fl_type = type;
fl->fl_end = OFFSET_MAX;
+
+ vxd_assert(filp->f_xid == vx_current_xid(),
+ "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
+ fl->fl_xid = filp->f_xid;
+ vx_locks_inc(fl);
*lock = fl;
return 0;
@@ -449,6 +461,7 @@ static int lease_init(struct file *filp,
fl->fl_owner = current->files;
fl->fl_pid = current->tgid;
+ fl->fl_xid = vx_current_xid();
fl->fl_file = filp;
fl->fl_flags = FL_LEASE;
@@ -468,6 +481,11 @@ static struct file_lock *lease_alloc(str
if (fl == NULL)
return ERR_PTR(error);
+ fl->fl_xid = vx_current_xid();
+ if (filp)
+ vxd_assert(filp->f_xid == fl->fl_xid,
+ "f_xid(%d) == fl_xid(%d)", filp->f_xid, fl->fl_xid);
+ vx_locks_inc(fl);
error = lease_init(filp, type, fl);
if (error) {
locks_free_lock(fl);
@@ -774,6 +792,7 @@ static int flock_lock_file(struct file *
if (found)
cond_resched();
+ new_fl->fl_xid = -1;
find_conflict:
for_each_lock(inode, before) {
struct file_lock *fl = *before;
@@ -792,6 +811,7 @@ find_conflict:
goto out;
locks_copy_lock(new_fl, request);
locks_insert_lock(before, new_fl);
+ vx_locks_inc(new_fl);
new_fl = NULL;
error = 0;
@@ -802,7 +822,8 @@ out:
return error;
}
-static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock)
+static int __posix_lock_file(struct inode *inode, struct file_lock *request,
+ struct file_lock *conflock, xid_t xid)
{
struct file_lock *fl;
struct file_lock *new_fl = NULL;
@@ -812,6 +833,8 @@ static int __posix_lock_file(struct inod
struct file_lock **before;
int error, added = 0;
+ vxd_assert(xid == vx_current_xid(),
+ "xid(%d) == current(%d)", xid, vx_current_xid());
/*
* We may need two file_lock structures for this operation,
* so we get them in advance to avoid races.
@@ -822,7 +845,11 @@ static int __posix_lock_file(struct inod
(request->fl_type != F_UNLCK ||
request->fl_start != 0 || request->fl_end != OFFSET_MAX)) {
new_fl = locks_alloc_lock();
+ new_fl->fl_xid = xid;
+ vx_locks_inc(new_fl);
new_fl2 = locks_alloc_lock();
+ new_fl2->fl_xid = xid;
+ vx_locks_inc(new_fl2);
}
lock_kernel();
@@ -1021,7 +1048,8 @@ static int __posix_lock_file(struct inod
int posix_lock_file(struct file *filp, struct file_lock *fl,
struct file_lock *conflock)
{
- return __posix_lock_file(filp->f_path.dentry->d_inode, fl, conflock);
+ return __posix_lock_file(filp->f_path.dentry->d_inode,
+ fl, conflock, filp->f_xid);
}
EXPORT_SYMBOL(posix_lock_file);
@@ -1111,7 +1139,7 @@ int locks_mandatory_area(int read_write,
fl.fl_end = offset + count - 1;
for (;;) {
- error = __posix_lock_file(inode, &fl, NULL);
+ error = __posix_lock_file(inode, &fl, NULL, filp->f_xid);
if (error != -EAGAIN)
break;
if (!(fl.fl_flags & FL_SLEEP))
@@ -1425,6 +1453,7 @@ int generic_setlease(struct file *filp,
locks_copy_lock(new_fl, lease);
locks_insert_lock(before, new_fl);
+ vx_locks_inc(new_fl);
*flp = new_fl;
return 0;
@@ -1756,6 +1785,11 @@ int fcntl_setlk(unsigned int fd, struct
if (file_lock == NULL)
return -ENOLCK;
+ vxd_assert(filp->f_xid == vx_current_xid(),
+ "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
+ file_lock->fl_xid = filp->f_xid;
+ vx_locks_inc(file_lock);
+
/*
* This might block, so we do it before checking the inode.
*/
@@ -1893,6 +1927,11 @@ int fcntl_setlk64(unsigned int fd, struc
if (file_lock == NULL)
return -ENOLCK;
+ vxd_assert(filp->f_xid == vx_current_xid(),
+ "f_xid(%d) == current(%d)", filp->f_xid, vx_current_xid());
+ file_lock->fl_xid = filp->f_xid;
+ vx_locks_inc(file_lock);
+
/*
* This might block, so we do it before checking the inode.
*/
@@ -2176,8 +2215,11 @@ static int locks_show(struct seq_file *f
lock_get_status(f, fl, (long)f->private, "");
- list_for_each_entry(bfl, &fl->fl_block, fl_block)
+ list_for_each_entry(bfl, &fl->fl_block, fl_block) {
+ if (!vx_check(fl->fl_xid, VS_WATCH_P | VS_IDENT))
+ continue;
lock_get_status(f, bfl, (long)f->private, " ->");
+ }
f->private++;
return 0;
--- a/fs/namei.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/namei.c 2008-04-22 19:23:22.000000000 -0400
@@ -30,6 +30,13 @@
#include <linux/capability.h>
#include <linux/file.h>
#include <linux/fcntl.h>
+#include <linux/proc_fs.h>
+#include <linux/vserver/inode.h>
+#include <linux/vs_base.h>
+#include <linux/vs_tag.h>
+#include <linux/vs_cowbl.h>
+#include <linux/vs_device.h>
+#include <linux/vs_context.h>
#include <asm/namei.h>
#include <asm/uaccess.h>
@@ -225,6 +232,28 @@ int generic_permission(struct inode *ino
return -EACCES;
}
+static inline int dx_barrier(struct inode *inode)
+{
+ if (IS_BARRIER(inode) && !vx_check(0, VS_ADMIN)) {
+ vxwprintk_task(1, "did hit the barrier.");
+ return 1;
+ }
+ return 0;
+}
+
+static inline int dx_permission(struct inode *inode, int mask, struct nameidata *nd)
+{
+ if (dx_barrier(inode))
+ return -EACCES;
+ if (dx_notagcheck(nd) ||
+ dx_check(inode->i_tag, DX_HOSTID|DX_ADMIN|DX_WATCH|DX_IDENT))
+ return 0;
+
+ vxwprintk_task(1, "denied access to %p[#%d,%lu] <20>%s<>.",
+ inode, inode->i_tag, inode->i_ino, vxd_cond_path(nd));
+ return -EACCES;
+}
+
int permission(struct inode *inode, int mask, struct nameidata *nd)
{
int retval, submask;
@@ -239,14 +268,14 @@ int permission(struct inode *inode, int
/*
* Nobody gets write access to a read-only fs.
*/
- if (IS_RDONLY(inode) &&
+ if ((IS_RDONLY(inode) || (nd && MNT_IS_RDONLY(nd->path.mnt))) &&
(S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
return -EROFS;
/*
* Nobody gets write access to an immutable file.
*/
- if (IS_IMMUTABLE(inode))
+ if (IS_IMMUTABLE(inode) && !IS_COW(inode))
return -EACCES;
}
@@ -261,6 +290,11 @@ int permission(struct inode *inode, int
/* Ordinary permission routines do not understand MAY_APPEND. */
submask = mask & ~MAY_APPEND;
+ if ((inode->i_sb->s_magic != DEVPTS_SUPER_MAGIC) &&
+ (inode->i_sb->s_magic != PROC_SUPER_MAGIC) &&
+ (retval = dx_permission(inode, mask, nd)))
+ return retval;
+
if (inode->i_op && inode->i_op->permission) {
retval = inode->i_op->permission(inode, submask, nd);
if (!retval) {
@@ -459,6 +493,8 @@ static int exec_permission_lite(struct i
{
umode_t mode = inode->i_mode;
+ if (dx_barrier(inode))
+ return -EACCES;
if (inode->i_op && inode->i_op->permission)
return -EAGAIN;
@@ -789,7 +825,8 @@ static __always_inline void follow_dotdo
if (nd->path.dentry == fs->root.dentry &&
nd->path.mnt == fs->root.mnt) {
read_unlock(&fs->lock);
- break;
+ /* for sane '/' avoid follow_mount() */
+ return;
}
read_unlock(&fs->lock);
spin_lock(&dcache_lock);
@@ -826,16 +863,39 @@ static int do_lookup(struct nameidata *n
{
struct vfsmount *mnt = nd->path.mnt;
struct dentry *dentry = __d_lookup(nd->path.dentry, name);
+ struct inode *inode;
if (!dentry)
goto need_lookup;
if (dentry->d_op && dentry->d_op->d_revalidate)
goto need_revalidate;
+ inode = dentry->d_inode;
+ if (!inode)
+ goto done;
+
+ if (inode->i_sb->s_magic == PROC_SUPER_MAGIC) {
+ struct proc_dir_entry *de = PDE(inode);
+
+ if (de && !vx_hide_check(0, de->vx_flags))
+ goto hidden;
+ } else if (inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC) {
+ if (!vx_check((xid_t)inode->i_tag, VS_WATCH_P | VS_IDENT))
+ goto hidden;
+ } else {
+ if (!dx_notagcheck(nd) && !dx_check(inode->i_tag,
+ DX_WATCH | DX_ADMIN | DX_HOSTID | DX_IDENT))
+ goto hidden;
+ }
done:
path->mnt = mnt;
path->dentry = dentry;
__follow_mount(path);
return 0;
+hidden:
+ vxwprintk_task(1, "did lookup hidden %p[#%d,%lu] <20>%s<>.",
+ inode, inode->i_tag, inode->i_ino, vxd_path(path));
+ dput(dentry);
+ return -ENOENT;
need_lookup:
dentry = real_lookup(nd->path.dentry, name, nd);
@@ -1464,7 +1524,8 @@ static inline int check_sticky(struct in
* 10. We don't allow removal of NFS sillyrenamed files; it's handled by
* nfs_async_unlink().
*/
-static int may_delete(struct inode *dir,struct dentry *victim,int isdir)
+static int may_delete(struct inode *dir, struct dentry *victim,
+ int isdir, struct nameidata *nd)
{
int error;
@@ -1474,13 +1535,13 @@ static int may_delete(struct inode *dir,
BUG_ON(victim->d_parent->d_inode != dir);
audit_inode_child(victim->d_name.name, victim, dir);
- error = permission(dir,MAY_WRITE | MAY_EXEC, NULL);
+ error = permission(dir,MAY_WRITE | MAY_EXEC, nd);
if (error)
return error;
if (IS_APPEND(dir))
return -EPERM;
if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
- IS_IMMUTABLE(victim->d_inode))
+ IS_IXORUNLINK(victim->d_inode))
return -EPERM;
if (isdir) {
if (!S_ISDIR(victim->d_inode->i_mode))
@@ -1626,6 +1687,14 @@ int may_open(struct nameidata *nd, int a
} else if (IS_RDONLY(inode) && (acc_mode & MAY_WRITE))
return -EROFS;
+#ifdef CONFIG_VSERVER_COWBL
+ if (IS_COW(inode) && (flag & FMODE_WRITE)) {
+ if (IS_COW_LINK(inode))
+ return -EMLINK;
+ inode->i_flags &= ~(S_IUNLINK|S_IMMUTABLE);
+ mark_inode_dirty(inode);
+ }
+#endif
error = vfs_permission(nd, acc_mode);
if (error)
return error;
@@ -1717,6 +1786,11 @@ int open_namei(int dfd, const char *path
struct dentry *dir;
int count = 0;
+#ifdef CONFIG_VSERVER_COWBL
+ int rflag = flag;
+ int rmode = mode;
+restart:
+#endif
acc_mode = ACC_MODE(flag);
/* O_TRUNC implies we need access checks for write permissions */
@@ -1810,6 +1884,22 @@ do_last:
goto exit;
ok:
error = may_open(nd, acc_mode, flag);
+#ifdef CONFIG_VSERVER_COWBL
+ if (error == -EMLINK) {
+ struct dentry *dentry;
+ dentry = cow_break_link(pathname);
+ if (IS_ERR(dentry)) {
+ error = PTR_ERR(dentry);
+ goto exit;
+ }
+ dput(dentry);
+ release_open_intent(nd);
+ path_put(&nd->path);
+ flag = rflag;
+ mode = rmode;
+ goto restart;
+ }
+#endif
if (error)
goto exit;
return 0;
@@ -1921,16 +2011,25 @@ fail:
}
EXPORT_SYMBOL_GPL(lookup_create);
-int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+int vfs_mknod(struct inode *dir, struct dentry *dentry,
+ int mode, dev_t dev, struct nameidata *nd)
{
- int error = may_create(dir, dentry, NULL);
+ int error = may_create(dir, dentry, nd);
if (error)
return error;
- if ((S_ISCHR(mode) || S_ISBLK(mode)) && !capable(CAP_MKNOD))
+ if (!(S_ISCHR(mode) || S_ISBLK(mode)))
+ goto okay;
+
+ if (!capable(CAP_MKNOD))
return -EPERM;
+ if (S_ISCHR(mode) && !vs_chrdev_perm(dev, DATTR_CREATE))
+ return -EPERM;
+ if (S_ISBLK(mode) && !vs_blkdev_perm(dev, DATTR_CREATE))
+ return -EPERM;
+okay:
if (!dir->i_op || !dir->i_op->mknod)
return -EPERM;
@@ -1973,11 +2072,12 @@ asmlinkage long sys_mknodat(int dfd, con
error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd);
break;
case S_IFCHR: case S_IFBLK:
- error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,
- new_decode_dev(dev));
+ error = vfs_mknod(nd.path.dentry->d_inode, dentry, mode,
+ new_decode_dev(dev), &nd);
break;
case S_IFIFO: case S_IFSOCK:
- error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0);
+ error = vfs_mknod(nd.path.dentry->d_inode, dentry, mode,
+ 0, &nd);
break;
case S_IFDIR:
error = -EPERM;
@@ -2000,9 +2100,10 @@ asmlinkage long sys_mknod(const char __u
return sys_mknodat(AT_FDCWD, filename, mode, dev);
}
-int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+int vfs_mkdir(struct inode *dir, struct dentry *dentry,
+ int mode, struct nameidata *nd)
{
- int error = may_create(dir, dentry, NULL);
+ int error = may_create(dir, dentry, nd);
if (error)
return error;
@@ -2044,7 +2145,7 @@ asmlinkage long sys_mkdirat(int dfd, con
if (!IS_POSIXACL(nd.path.dentry->d_inode))
mode &= ~current->fs->umask;
- error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
+ error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode, &nd);
dput(dentry);
out_unlock:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
@@ -2087,9 +2188,10 @@ void dentry_unhash(struct dentry *dentry
spin_unlock(&dcache_lock);
}
-int vfs_rmdir(struct inode *dir, struct dentry *dentry)
+int vfs_rmdir(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
{
- int error = may_delete(dir, dentry, 1);
+ int error = may_delete(dir, dentry, 1, nd);
if (error)
return error;
@@ -2151,7 +2253,7 @@ static long do_rmdir(int dfd, const char
error = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto exit2;
- error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
+ error = vfs_rmdir(nd.path.dentry->d_inode, dentry, &nd);
dput(dentry);
exit2:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
@@ -2167,9 +2269,10 @@ asmlinkage long sys_rmdir(const char __u
return do_rmdir(AT_FDCWD, pathname);
}
-int vfs_unlink(struct inode *dir, struct dentry *dentry)
+int vfs_unlink(struct inode *dir, struct dentry *dentry,
+ struct nameidata *nd)
{
- int error = may_delete(dir, dentry, 0);
+ int error = may_delete(dir, dentry, 0, nd);
if (error)
return error;
@@ -2232,7 +2335,7 @@ static long do_unlinkat(int dfd, const c
inode = dentry->d_inode;
if (inode)
atomic_inc(&inode->i_count);
- error = vfs_unlink(nd.path.dentry->d_inode, dentry);
+ error = vfs_unlink(nd.path.dentry->d_inode, dentry, &nd);
exit2:
dput(dentry);
}
@@ -2267,9 +2370,10 @@ asmlinkage long sys_unlink(const char __
return do_unlinkat(AT_FDCWD, pathname);
}
-int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname, int mode)
+int vfs_symlink(struct inode *dir, struct dentry *dentry,
+ const char *oldname, int mode, struct nameidata *nd)
{
- int error = may_create(dir, dentry, NULL);
+ int error = may_create(dir, dentry, nd);
if (error)
return error;
@@ -2313,7 +2417,8 @@ asmlinkage long sys_symlinkat(const char
if (IS_ERR(dentry))
goto out_unlock;
- error = vfs_symlink(nd.path.dentry->d_inode, dentry, from, S_IALLUGO);
+ error = vfs_symlink(nd.path.dentry->d_inode, dentry, from,
+ S_IALLUGO, &nd);
dput(dentry);
out_unlock:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
@@ -2330,7 +2435,8 @@ asmlinkage long sys_symlink(const char _
return sys_symlinkat(oldname, AT_FDCWD, newname);
}
-int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
+int vfs_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry, struct nameidata *nd)
{
struct inode *inode = old_dentry->d_inode;
int error;
@@ -2338,7 +2444,7 @@ int vfs_link(struct dentry *old_dentry,
if (!inode)
return -ENOENT;
- error = may_create(dir, new_dentry, NULL);
+ error = may_create(dir, new_dentry, nd);
if (error)
return error;
@@ -2348,7 +2454,7 @@ int vfs_link(struct dentry *old_dentry,
/*
* A link to an append-only or immutable file cannot be created.
*/
- if (IS_APPEND(inode) || IS_IMMUTABLE(inode))
+ if (IS_APPEND(inode) || IS_IXORUNLINK(inode))
return -EPERM;
if (!dir->i_op || !dir->i_op->link)
return -EPERM;
@@ -2408,7 +2514,8 @@ asmlinkage long sys_linkat(int olddfd, c
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
goto out_unlock;
- error = vfs_link(old_nd.path.dentry, nd.path.dentry->d_inode, new_dentry);
+ error = vfs_link(old_nd.path.dentry, nd.path.dentry->d_inode,
+ new_dentry, &nd);
dput(new_dentry);
out_unlock:
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
@@ -2540,14 +2647,14 @@ int vfs_rename(struct inode *old_dir, st
if (old_dentry->d_inode == new_dentry->d_inode)
return 0;
- error = may_delete(old_dir, old_dentry, is_dir);
+ error = may_delete(old_dir, old_dentry, is_dir, NULL);
if (error)
return error;
if (!new_dentry->d_inode)
error = may_create(new_dir, new_dentry, NULL);
else
- error = may_delete(new_dir, new_dentry, is_dir);
+ error = may_delete(new_dir, new_dentry, is_dir, NULL);
if (error)
return error;
@@ -2625,6 +2732,9 @@ static int do_rename(int olddfd, const c
error = -EINVAL;
if (old_dentry == trap)
goto exit4;
+ error = -EROFS;
+ if (MNT_IS_RDONLY(newnd.path.mnt))
+ goto exit4;
new_dentry = lookup_hash(&newnd);
error = PTR_ERR(new_dentry);
if (IS_ERR(new_dentry))
@@ -2718,6 +2828,214 @@ int vfs_follow_link(struct nameidata *nd
return __vfs_follow_link(nd, link);
}
+
+#ifdef CONFIG_VSERVER_COWBL
+
+#include <linux/file.h>
+
+static inline
+long do_cow_splice(struct file *in, struct file *out, size_t len)
+{
+ loff_t ppos = 0;
+
+ return do_splice_direct(in, &ppos, out, len, 0);
+}
+
+struct dentry *cow_break_link(const char *pathname)
+{
+ int ret, mode, pathlen, redo = 0;
+ struct nameidata old_nd, dir_nd;
+ struct path old_path, new_path;
+ struct dentry *dir, *res = NULL;
+ struct file *old_file;
+ struct file *new_file;
+ char *to, *path, pad='\251';
+ loff_t size;
+
+ vxdprintk(VXD_CBIT(misc, 1), "cow_break_link(<28>%s<>)", pathname);
+ path = kmalloc(PATH_MAX, GFP_KERNEL);
+ ret = -ENOMEM;
+ if (!path)
+ goto out;
+
+ /* old_nd will have refs to dentry and mnt */
+ ret = path_lookup(pathname, LOOKUP_FOLLOW, &old_nd);
+ vxdprintk(VXD_CBIT(misc, 2), "path_lookup(old): %d", ret);
+ if (ret < 0)
+ goto out_free_path;
+
+ old_path = old_nd.path;
+ mode = old_path.dentry->d_inode->i_mode;
+
+ to = d_path(&old_path, path, PATH_MAX-2);
+ pathlen = strlen(to);
+ vxdprintk(VXD_CBIT(misc, 2), "old path <20>%s<> [<5B>%.*s<>:%d]", to,
+ old_path.dentry->d_name.len, old_path.dentry->d_name.name,
+ old_path.dentry->d_name.len);
+
+ to[pathlen + 1] = 0;
+retry:
+ to[pathlen] = pad--;
+ ret = -EMLINK;
+ if (pad <= '\240')
+ goto out_rel_old;
+
+ vxdprintk(VXD_CBIT(misc, 1), "temp copy <20>%s<>", to);
+ /* dir_nd will have refs to dentry and mnt */
+ ret = path_lookup(to,
+ LOOKUP_PARENT | LOOKUP_OPEN | LOOKUP_CREATE, &dir_nd);
+ vxdprintk(VXD_CBIT(misc, 2),
+ "path_lookup(new): %d", ret);
+ if (ret < 0)
+ goto retry;
+
+ /* this puppy downs the inode mutex */
+ new_path.dentry = lookup_create(&dir_nd, 0);
+ vxdprintk(VXD_CBIT(misc, 2),
+ "lookup_create(new): %p [<5B>%.*s<>:%d]", new_path.dentry,
+ new_path.dentry->d_name.len, new_path.dentry->d_name.name,
+ new_path.dentry->d_name.len);
+ if (!new_path.dentry || IS_ERR(new_path.dentry)) {
+ path_put(&dir_nd.path);
+ goto retry;
+ }
+ dir = dir_nd.path.dentry;
+
+ ret = vfs_create(dir_nd.path.dentry->d_inode, new_path.dentry, mode, &dir_nd);
+ vxdprintk(VXD_CBIT(misc, 2),
+ "vfs_create(new): %d", ret);
+ if (ret == -EEXIST) {
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(new_path.dentry);
+ path_put(&dir_nd.path);
+ goto retry;
+ }
+ else if (ret < 0)
+ goto out_unlock_new;
+
+ /* drop out early, ret passes ENOENT */
+ ret = -ENOENT;
+ if ((redo = d_unhashed(old_path.dentry)))
+ goto out_unlock_new;
+
+ new_path.mnt = dir_nd.path.mnt;
+ dget(old_path.dentry);
+ mntget(old_path.mnt);
+ /* this one cleans up the dentry/mnt in case of failure */
+ old_file = dentry_open(old_path.dentry, old_path.mnt, O_RDONLY);
+ vxdprintk(VXD_CBIT(misc, 2),
+ "dentry_open(old): %p", old_file);
+ if (!old_file || IS_ERR(old_file)) {
+ res = IS_ERR(old_file) ? (void *) old_file : res;
+ goto out_unlock_new;
+ }
+
+ dget(new_path.dentry);
+ mntget(new_path.mnt);
+ /* this one cleans up the dentry/mnt in case of failure */
+ new_file = dentry_open(new_path.dentry, new_path.mnt, O_WRONLY);
+ vxdprintk(VXD_CBIT(misc, 2),
+ "dentry_open(new): %p", new_file);
+
+ ret = IS_ERR(new_file) ? PTR_ERR(new_file) : -ENOENT;
+ if (!new_file || IS_ERR(new_file))
+ goto out_fput_old;
+
+ size = i_size_read(old_file->f_dentry->d_inode);
+ ret = do_cow_splice(old_file, new_file, size);
+ vxdprintk(VXD_CBIT(misc, 2), "do_splice_direct: %d", ret);
+ if (ret < 0) {
+ goto out_fput_both;
+ } else if (ret < size) {
+ ret = -ENOSPC;
+ goto out_fput_both;
+ } else {
+ struct inode *old_inode = old_path.dentry->d_inode;
+ struct inode *new_inode = new_path.dentry->d_inode;
+ struct iattr attr = {
+ .ia_uid = old_inode->i_uid,
+ .ia_gid = old_inode->i_gid,
+ .ia_valid = ATTR_UID | ATTR_GID
+ };
+
+ ret = inode_setattr(new_inode, &attr);
+ if (ret)
+ goto out_fput_both;
+ }
+
+ mutex_lock(&old_path.dentry->d_inode->i_sb->s_vfs_rename_mutex);
+
+ /* drop out late */
+ ret = -ENOENT;
+ if ((redo = d_unhashed(old_path.dentry)))
+ goto out_unlock;
+
+ vxdprintk(VXD_CBIT(misc, 2),
+ "vfs_rename: [<5B>%*s<>:%d] -> [<5B>%*s<>:%d]",
+ new_path.dentry->d_name.len, new_path.dentry->d_name.name,
+ new_path.dentry->d_name.len,
+ old_path.dentry->d_name.len, old_path.dentry->d_name.name,
+ old_path.dentry->d_name.len);
+ ret = vfs_rename(dir_nd.path.dentry->d_inode, new_path.dentry,
+ old_nd.path.dentry->d_parent->d_inode, old_path.dentry);
+ vxdprintk(VXD_CBIT(misc, 2), "vfs_rename: %d", ret);
+ res = new_path.dentry;
+
+out_unlock:
+ mutex_unlock(&old_path.dentry->d_inode->i_sb->s_vfs_rename_mutex);
+
+out_fput_both:
+ vxdprintk(VXD_CBIT(misc, 3),
+ "fput(new_file=%p[#%d])", new_file,
+ atomic_read(&new_file->f_count));
+ fput(new_file);
+
+out_fput_old:
+ vxdprintk(VXD_CBIT(misc, 3),
+ "fput(old_file=%p[#%d])", old_file,
+ atomic_read(&old_file->f_count));
+ fput(old_file);
+
+out_unlock_new:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ if (!ret)
+ goto out_redo;
+
+ /* error path cleanup */
+ vfs_unlink(dir->d_inode, new_path.dentry, &dir_nd);
+ dput(new_path.dentry);
+
+out_redo:
+ if (!redo)
+ goto out_rel_both;
+ /* lookup dentry once again */
+ path_put(&old_nd.path);
+ ret = path_lookup(pathname, LOOKUP_FOLLOW, &old_nd);
+ if (ret)
+ goto out_rel_both;
+
+ new_path.dentry = old_nd.path.dentry;
+ vxdprintk(VXD_CBIT(misc, 2),
+ "path_lookup(redo): %p [<5B>%.*s<>:%d]", new_path.dentry,
+ new_path.dentry->d_name.len, new_path.dentry->d_name.name,
+ new_path.dentry->d_name.len);
+ dget(new_path.dentry);
+ res = new_path.dentry;
+
+out_rel_both:
+ path_put(&dir_nd.path);
+out_rel_old:
+ path_put(&old_nd.path);
+out_free_path:
+ kfree(path);
+out:
+ if (ret)
+ res = ERR_PTR(ret);
+ return res;
+}
+
+#endif
+
/* get the link contents into pagecache */
static char *page_getlink(struct dentry * dentry, struct page **ppage)
{
--- a/fs/namespace.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/namespace.c 2008-04-21 17:20:53.000000000 -0400
@@ -26,6 +26,11 @@
#include <linux/mount.h>
#include <linux/ramfs.h>
#include <linux/log2.h>
+#include <linux/vs_base.h>
+#include <linux/vs_context.h>
+#include <linux/vs_tag.h>
+#include <linux/vserver/space.h>
+#include <linux/vserver/global.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
#include "pnode.h"
@@ -244,6 +249,7 @@ static struct vfsmount *clone_mnt(struct
mnt->mnt_root = dget(root);
mnt->mnt_mountpoint = mnt->mnt_root;
mnt->mnt_parent = mnt;
+ mnt->mnt_tag = old->mnt_tag;
if (flag & CL_SLAVE) {
list_add(&mnt->mnt_slave, &old->mnt_slave_list);
@@ -323,6 +329,31 @@ static inline void mangle(struct seq_fil
seq_escape(m, s, " \t\n\\");
}
+static int mnt_is_reachable(struct vfsmount *mnt)
+{
+ struct path root;
+ struct dentry *point;
+ int ret;
+
+ if (mnt == mnt->mnt_ns->root)
+ return 1;
+
+ spin_lock(&vfsmount_lock);
+ root = current->fs->root;
+ point = root.dentry;
+
+ while ((mnt != mnt->mnt_parent) && (mnt != root.mnt)) {
+ point = mnt->mnt_mountpoint;
+ mnt = mnt->mnt_parent;
+ }
+
+ ret = (mnt == root.mnt) && is_subdir(point, root.dentry);
+
+ spin_unlock(&vfsmount_lock);
+
+ return ret;
+}
+
/*
* Simple .show_options callback for filesystems which don't want to
* implement more complex mount option showing.
@@ -388,44 +419,61 @@ static int show_vfsmnt(struct seq_file *
struct vfsmount *mnt = list_entry(v, struct vfsmount, mnt_list);
int err = 0;
static struct proc_fs_info {
- int flag;
- char *str;
+ int s_flag;
+ int mnt_flag;
+ char *set_str;
+ char *unset_str;
} fs_info[] = {
- { MS_SYNCHRONOUS, ",sync" },
- { MS_DIRSYNC, ",dirsync" },
- { MS_MANDLOCK, ",mand" },
- { 0, NULL }
- };
- static struct proc_fs_info mnt_info[] = {
- { MNT_NOSUID, ",nosuid" },
- { MNT_NODEV, ",nodev" },
- { MNT_NOEXEC, ",noexec" },
- { MNT_NOATIME, ",noatime" },
- { MNT_NODIRATIME, ",nodiratime" },
- { MNT_RELATIME, ",relatime" },
- { 0, NULL }
+ { MS_RDONLY, MNT_RDONLY, "ro", "rw" },
+ { MS_SYNCHRONOUS, 0, ",sync", NULL },
+ { MS_DIRSYNC, 0, ",dirsync", NULL },
+ { MS_MANDLOCK, 0, ",mand", NULL },
+ { MS_TAGGED, 0, ",tag", NULL },
+ { MS_NOATIME, MNT_NOATIME, ",noatime", NULL },
+ { MS_NODIRATIME, MNT_NODIRATIME, ",nodiratime", NULL },
+ { MS_RELATIME, MNT_RELATIME, ",relatime", NULL },
+ { 0, MNT_NOSUID, ",nosuid", NULL },
+ { 0, MNT_NODEV, ",nodev", NULL },
+ { 0, MNT_NOEXEC, ",noexec", NULL },
+ { 0, 0, NULL, NULL }
};
- struct proc_fs_info *fs_infop;
- struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ struct proc_fs_info *p;
+ unsigned long s_flags = mnt->mnt_sb->s_flags;
+ int mnt_flags = mnt->mnt_flags;
- mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
- seq_putc(m, ' ');
- seq_path(m, &mnt_path, " \t\n\\");
- seq_putc(m, ' ');
- mangle(m, mnt->mnt_sb->s_type->name);
- if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
- seq_putc(m, '.');
- mangle(m, mnt->mnt_sb->s_subtype);
- }
- seq_puts(m, mnt->mnt_sb->s_flags & MS_RDONLY ? " ro" : " rw");
- for (fs_infop = fs_info; fs_infop->flag; fs_infop++) {
- if (mnt->mnt_sb->s_flags & fs_infop->flag)
- seq_puts(m, fs_infop->str);
+ if (vx_flags(VXF_HIDE_MOUNT, 0))
+ return 0;
+ if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
+ return 0;
+
+ if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
+ mnt == current->fs->root.mnt) {
+ seq_puts(m, "/dev/root / ");
+ } else {
+ struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
+ mangle(m, mnt->mnt_devname ? mnt->mnt_devname : "none");
+ seq_putc(m, ' ');
+ seq_path(m, &mnt_path, " \t\n\\");
+ seq_putc(m, ' ');
+
+ if (mnt->mnt_sb->s_subtype && mnt->mnt_sb->s_subtype[0]) {
+ seq_putc(m, '.');
+ mangle(m, mnt->mnt_sb->s_subtype);
+ }
}
- for (fs_infop = mnt_info; fs_infop->flag; fs_infop++) {
- if (mnt->mnt_flags & fs_infop->flag)
- seq_puts(m, fs_infop->str);
+ mangle(m, mnt->mnt_sb->s_type->name);
+ seq_putc(m, ' ');
+ for (p = fs_info; (p->s_flag | p->mnt_flag) ; p++) {
+ if ((s_flags & p->s_flag) || (mnt_flags & p->mnt_flag)) {
+ if (p->set_str)
+ seq_puts(m, p->set_str);
+ } else {
+ if (p->unset_str)
+ seq_puts(m, p->unset_str);
+ }
}
+ if (mnt->mnt_flags & MNT_TAGID)
+ seq_printf(m, ",tag=%d", mnt->mnt_tag);
if (mnt->mnt_sb->s_op->show_options)
err = mnt->mnt_sb->s_op->show_options(m, mnt);
seq_puts(m, " 0 0\n");
@@ -445,17 +493,27 @@ static int show_vfsstat(struct seq_file
struct path mnt_path = { .dentry = mnt->mnt_root, .mnt = mnt };
int err = 0;
- /* device */
- if (mnt->mnt_devname) {
- seq_puts(m, "device ");
- mangle(m, mnt->mnt_devname);
- } else
- seq_puts(m, "no device");
+ if (vx_flags(VXF_HIDE_MOUNT, 0))
+ return 0;
+ if (!mnt_is_reachable(mnt) && !vx_check(0, VS_WATCH_P))
+ return 0;
- /* mount point */
- seq_puts(m, " mounted on ");
- seq_path(m, &mnt_path, " \t\n\\");
- seq_putc(m, ' ');
+ if (!vx_check(0, VS_ADMIN|VS_WATCH) &&
+ mnt == current->fs->root.mnt) {
+ seq_puts(m, "device /dev/root mounted on / ");
+ } else {
+ /* device */
+ if (mnt->mnt_devname) {
+ seq_puts(m, "device ");
+ mangle(m, mnt->mnt_devname);
+ } else
+ seq_puts(m, "no device");
+
+ /* mount point */
+ seq_puts(m, " mounted on ");
+ seq_path(m, &mnt_path, " \t\n\\");
+ seq_putc(m, ' ');
+ }
/* file system type */
seq_puts(m, "with fstype ");
@@ -693,7 +751,7 @@ asmlinkage long sys_umount(char __user *
goto dput_and_out;
retval = -EPERM;
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
goto dput_and_out;
retval = do_umount(nd.path.mnt, flags);
@@ -719,7 +777,7 @@ asmlinkage long sys_oldumount(char __use
static int mount_is_safe(struct nameidata *nd)
{
- if (capable(CAP_SYS_ADMIN))
+ if (vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
return 0;
return -EPERM;
#ifdef notyet
@@ -974,11 +1032,13 @@ static noinline int do_change_type(struc
* noinline this do_mount helper to save do_mount stack space.
*/
static noinline int do_loopback(struct nameidata *nd, char *old_name,
- int recurse)
+ tag_t tag, unsigned long flags, int mnt_flags)
{
struct nameidata old_nd;
struct vfsmount *mnt = NULL;
int err = mount_is_safe(nd);
+ int recurse = flags & MS_REC;
+
if (err)
return err;
if (!old_name || !*old_name)
@@ -1004,6 +1064,12 @@ static noinline int do_loopback(struct n
if (!mnt)
goto out;
+ mnt->mnt_flags = mnt_flags;
+ if (flags & MS_TAGID) {
+ mnt->mnt_tag = tag;
+ mnt->mnt_flags |= MNT_TAGID;
+ }
+
err = graft_tree(mnt, nd);
if (err) {
LIST_HEAD(umount_list);
@@ -1012,6 +1078,7 @@ static noinline int do_loopback(struct n
spin_unlock(&vfsmount_lock);
release_mounts(&umount_list);
}
+ mnt->mnt_flags = mnt_flags;
out:
up_write(&namespace_sem);
@@ -1026,12 +1093,12 @@ out:
* noinline this do_mount helper to save do_mount stack space.
*/
static noinline int do_remount(struct nameidata *nd, int flags, int mnt_flags,
- void *data)
+ void *data, xid_t xid)
{
int err;
struct super_block *sb = nd->path.mnt->mnt_sb;
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_REMOUNT))
return -EPERM;
if (!check_mnt(nd->path.mnt))
@@ -1069,7 +1136,7 @@ static noinline int do_move_mount(struct
struct path parent_path;
struct vfsmount *p;
int err = 0;
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
return -EPERM;
if (!old_name || !*old_name)
return -EINVAL;
@@ -1152,7 +1219,7 @@ static noinline int do_new_mount(struct
return -EINVAL;
/* we need capabilities... */
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_SECURE_MOUNT))
return -EPERM;
mnt = do_kern_mount(type, flags, name, data);
@@ -1397,6 +1464,7 @@ long do_mount(char *dev_name, char *dir_
struct nameidata nd;
int retval = 0;
int mnt_flags = 0;
+ tag_t tag = 0;
/* Discard magic */
if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
@@ -1412,7 +1480,17 @@ long do_mount(char *dev_name, char *dir_
if (data_page)
((char *)data_page)[PAGE_SIZE - 1] = 0;
+ retval = dx_parse_tag(data_page, &tag, 1);
+ if (retval) {
+ mnt_flags |= retval;
+ /* FIXME: bind and re-mounts get the tag flag? */
+ if (flags & (MS_BIND|MS_REMOUNT))
+ flags |= MS_TAGID;
+ }
+
/* Separate the per-mountpoint flags */
+ if (flags & MS_RDONLY)
+ mnt_flags |= MNT_RDONLY;
if (flags & MS_NOSUID)
mnt_flags |= MNT_NOSUID;
if (flags & MS_NODEV)
@@ -1426,6 +1504,8 @@ long do_mount(char *dev_name, char *dir_
if (flags & MS_RELATIME)
mnt_flags |= MNT_RELATIME;
+ if (!capable(CAP_SYS_ADMIN))
+ mnt_flags |= MNT_NODEV;
flags &= ~(MS_NOSUID | MS_NOEXEC | MS_NODEV | MS_ACTIVE |
MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT);
@@ -1440,9 +1520,9 @@ long do_mount(char *dev_name, char *dir_
if (flags & MS_REMOUNT)
retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
- data_page);
+ data_page, tag);
else if (flags & MS_BIND)
- retval = do_loopback(&nd, dev_name, flags & MS_REC);
+ retval = do_loopback(&nd, dev_name, tag, flags, mnt_flags);
else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
retval = do_change_type(&nd, flags);
else if (flags & MS_MOVE)
@@ -1515,6 +1595,7 @@ static struct mnt_namespace *dup_mnt_ns(
q = next_mnt(q, new_ns->root);
}
up_write(&namespace_sem);
+ atomic_inc(&vs_global_mnt_ns);
if (rootmnt)
mntput(rootmnt);
@@ -1850,5 +1931,6 @@ void __put_mnt_ns(struct mnt_namespace *
spin_unlock(&vfsmount_lock);
up_write(&namespace_sem);
release_mounts(&umount_list);
+ atomic_dec(&vs_global_mnt_ns);
kfree(ns);
}
--- a/fs/nfs/client.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfs/client.c 2008-04-19 15:14:52.000000000 -0400
@@ -589,6 +589,9 @@ static int nfs_init_server_rpcclient(str
if (server->flags & NFS_MOUNT_SOFT)
server->client->cl_softrtry = 1;
+ server->client->cl_tag = 0;
+ if (server->flags & NFS_MOUNT_TAGGED)
+ server->client->cl_tag = 1;
return 0;
}
@@ -742,6 +745,10 @@ static void nfs_server_set_fsinfo(struct
server->acdirmin = server->acdirmax = 0;
}
+ /* FIXME: needs fsinfo
+ if (server->flags & NFS_MOUNT_TAGGED)
+ sb->s_flags |= MS_TAGGED; */
+
server->maxfilesize = fsinfo->maxfilesize;
/* We're airborne Set socket buffersize */
--- a/fs/nfs/dir.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfs/dir.c 2008-04-21 16:52:03.000000000 -0400
@@ -34,6 +34,7 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/sched.h>
+#include <linux/vs_tag.h>
#include "nfs4_fs.h"
#include "delegation.h"
@@ -930,6 +931,7 @@ static struct dentry *nfs_lookup(struct
if (IS_ERR(res))
goto out_unblock_sillyrename;
+ dx_propagate_tag(nd, inode);
no_entry:
res = d_materialise_unique(dentry, inode);
if (res != NULL) {
@@ -967,7 +969,8 @@ static int is_atomic_open(struct inode *
if (nd->flags & LOOKUP_DIRECTORY)
return 0;
/* Are we trying to write to a read only partition? */
- if (IS_RDONLY(dir) && (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
+ if ((IS_RDONLY(dir) || MNT_IS_RDONLY(nd->path.mnt)) &&
+ (nd->intent.open.flags & (O_CREAT|O_TRUNC|FMODE_WRITE)))
return 0;
return 1;
}
--- a/fs/nfs/inode.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfs/inode.c 2008-04-19 15:14:52.000000000 -0400
@@ -37,6 +37,7 @@
#include <linux/vfs.h>
#include <linux/inet.h>
#include <linux/nfs_xdr.h>
+#include <linux/vs_tag.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -316,8 +317,10 @@ nfs_fhget(struct super_block *sb, struct
nfsi->change_attr = fattr->change_attr;
inode->i_size = nfs_size_to_loff_t(fattr->size);
inode->i_nlink = fattr->nlink;
- inode->i_uid = fattr->uid;
- inode->i_gid = fattr->gid;
+ inode->i_uid = INOTAG_UID(DX_TAG(inode), fattr->uid, fattr->gid);
+ inode->i_gid = INOTAG_GID(DX_TAG(inode), fattr->uid, fattr->gid);
+ inode->i_tag = INOTAG_TAG(DX_TAG(inode), fattr->uid, fattr->gid, 0);
+ /* maybe fattr->xid someday */
if (fattr->valid & (NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4)) {
/*
* report the blocks in 512byte units
@@ -410,6 +413,8 @@ void nfs_setattr_update_inode(struct ino
inode->i_uid = attr->ia_uid;
if ((attr->ia_valid & ATTR_GID) != 0)
inode->i_gid = attr->ia_gid;
+ if ((attr->ia_valid & ATTR_TAG) && IS_TAGGED(inode))
+ inode->i_tag = attr->ia_tag;
spin_lock(&inode->i_lock);
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
spin_unlock(&inode->i_lock);
@@ -841,6 +846,9 @@ static int nfs_check_inode_attributes(st
struct nfs_inode *nfsi = NFS_I(inode);
loff_t cur_size, new_isize;
unsigned long invalid = 0;
+ uid_t uid;
+ gid_t gid;
+ tag_t tag;
/* Has the inode gone and changed behind our back? */
@@ -865,10 +873,15 @@ static int nfs_check_inode_attributes(st
if (cur_size != new_isize && nfsi->npages == 0)
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
+ uid = INOTAG_UID(DX_TAG(inode), fattr->uid, fattr->gid);
+ gid = INOTAG_GID(DX_TAG(inode), fattr->uid, fattr->gid);
+ tag = INOTAG_TAG(DX_TAG(inode), fattr->uid, fattr->gid, 0);
+
/* Have any file permissions changed? */
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)
- || inode->i_uid != fattr->uid
- || inode->i_gid != fattr->gid)
+ || inode->i_uid != uid
+ || inode->i_gid != gid
+ || inode->i_tag != tag)
invalid |= NFS_INO_INVALID_ATTR | NFS_INO_INVALID_ACCESS | NFS_INO_INVALID_ACL;
/* Has the link count changed? */
@@ -989,6 +1002,9 @@ static int nfs_update_inode(struct inode
loff_t cur_isize, new_isize;
unsigned long invalid = 0;
unsigned long now = jiffies;
+ uid_t uid;
+ gid_t gid;
+ tag_t tag;
dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
__FUNCTION__, inode->i_sb->s_id, inode->i_ino,
@@ -1062,15 +1078,21 @@ static int nfs_update_inode(struct inode
memcpy(&inode->i_atime, &fattr->atime, sizeof(inode->i_atime));
nfsi->change_attr = fattr->change_attr;
+ uid = INOTAG_UID(DX_TAG(inode), fattr->uid, fattr->gid);
+ gid = INOTAG_GID(DX_TAG(inode), fattr->uid, fattr->gid);
+ tag = INOTAG_TAG(DX_TAG(inode), fattr->uid, fattr->gid, 0);
+
if ((inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO) ||
- inode->i_uid != fattr->uid ||
- inode->i_gid != fattr->gid)
+ inode->i_uid != uid ||
+ inode->i_gid != gid ||
+ inode->i_tag != tag)
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
inode->i_mode = fattr->mode;
inode->i_nlink = fattr->nlink;
- inode->i_uid = fattr->uid;
- inode->i_gid = fattr->gid;
+ inode->i_uid = uid;
+ inode->i_gid = gid;
+ inode->i_tag = tag;
if (fattr->valid & (NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4)) {
/*
--- a/fs/nfs/nfs3xdr.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfs/nfs3xdr.c 2008-04-19 15:14:52.000000000 -0400
@@ -22,6 +22,7 @@
#include <linux/nfs3.h>
#include <linux/nfs_fs.h>
#include <linux/nfsacl.h>
+#include <linux/vs_tag.h>
#include "internal.h"
#define NFSDBG_FACILITY NFSDBG_XDR
@@ -180,7 +181,7 @@ xdr_decode_fattr(__be32 *p, struct nfs_f
}
static inline __be32 *
-xdr_encode_sattr(__be32 *p, struct iattr *attr)
+xdr_encode_sattr(__be32 *p, struct iattr *attr, int tag)
{
if (attr->ia_valid & ATTR_MODE) {
*p++ = xdr_one;
@@ -188,15 +189,17 @@ xdr_encode_sattr(__be32 *p, struct iattr
} else {
*p++ = xdr_zero;
}
- if (attr->ia_valid & ATTR_UID) {
+ if (attr->ia_valid & ATTR_UID ||
+ (tag && (attr->ia_valid & ATTR_TAG))) {
*p++ = xdr_one;
- *p++ = htonl(attr->ia_uid);
+ *p++ = htonl(TAGINO_UID(tag, attr->ia_uid, attr->ia_tag));
} else {
*p++ = xdr_zero;
}
- if (attr->ia_valid & ATTR_GID) {
+ if (attr->ia_valid & ATTR_GID ||
+ (tag && (attr->ia_valid & ATTR_TAG))) {
*p++ = xdr_one;
- *p++ = htonl(attr->ia_gid);
+ *p++ = htonl(TAGINO_GID(tag, attr->ia_gid, attr->ia_tag));
} else {
*p++ = xdr_zero;
}
@@ -281,7 +284,8 @@ static int
nfs3_xdr_sattrargs(struct rpc_rqst *req, __be32 *p, struct nfs3_sattrargs *args)
{
p = xdr_encode_fhandle(p, args->fh);
- p = xdr_encode_sattr(p, args->sattr);
+ p = xdr_encode_sattr(p, args->sattr,
+ req->rq_task->tk_client->cl_tag);
*p++ = htonl(args->guard);
if (args->guard)
p = xdr_encode_time3(p, &args->guardtime);
@@ -386,7 +390,8 @@ nfs3_xdr_createargs(struct rpc_rqst *req
*p++ = args->verifier[0];
*p++ = args->verifier[1];
} else
- p = xdr_encode_sattr(p, args->sattr);
+ p = xdr_encode_sattr(p, args->sattr,
+ req->rq_task->tk_client->cl_tag);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
@@ -400,7 +405,8 @@ nfs3_xdr_mkdirargs(struct rpc_rqst *req,
{
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_array(p, args->name, args->len);
- p = xdr_encode_sattr(p, args->sattr);
+ p = xdr_encode_sattr(p, args->sattr,
+ req->rq_task->tk_client->cl_tag);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
return 0;
}
@@ -413,7 +419,8 @@ nfs3_xdr_symlinkargs(struct rpc_rqst *re
{
p = xdr_encode_fhandle(p, args->fromfh);
p = xdr_encode_array(p, args->fromname, args->fromlen);
- p = xdr_encode_sattr(p, args->sattr);
+ p = xdr_encode_sattr(p, args->sattr,
+ req->rq_task->tk_client->cl_tag);
*p++ = htonl(args->pathlen);
req->rq_slen = xdr_adjust_iovec(req->rq_svec, p);
@@ -431,7 +438,8 @@ nfs3_xdr_mknodargs(struct rpc_rqst *req,
p = xdr_encode_fhandle(p, args->fh);
p = xdr_encode_array(p, args->name, args->len);
*p++ = htonl(args->type);
- p = xdr_encode_sattr(p, args->sattr);
+ p = xdr_encode_sattr(p, args->sattr,
+ req->rq_task->tk_client->cl_tag);
if (args->type == NF3CHR || args->type == NF3BLK) {
*p++ = htonl(MAJOR(args->rdev));
*p++ = htonl(MINOR(args->rdev));
--- a/fs/nfs/nfsroot.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfs/nfsroot.c 2008-04-19 15:14:52.000000000 -0400
@@ -119,12 +119,12 @@ static int mount_port __initdata = 0; /
enum {
/* Options that take integer arguments */
Opt_port, Opt_rsize, Opt_wsize, Opt_timeo, Opt_retrans, Opt_acregmin,
- Opt_acregmax, Opt_acdirmin, Opt_acdirmax,
+ Opt_acregmax, Opt_acdirmin, Opt_acdirmax, Opt_tagid,
/* Options that take no arguments */
Opt_soft, Opt_hard, Opt_intr,
Opt_nointr, Opt_posix, Opt_noposix, Opt_cto, Opt_nocto, Opt_ac,
Opt_noac, Opt_lock, Opt_nolock, Opt_v2, Opt_v3, Opt_udp, Opt_tcp,
- Opt_acl, Opt_noacl,
+ Opt_acl, Opt_noacl, Opt_tag, Opt_notag,
/* Error token */
Opt_err
};
@@ -161,6 +161,9 @@ static match_table_t __initdata tokens =
{Opt_tcp, "tcp"},
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
+ {Opt_tag, "tag"},
+ {Opt_notag, "notag"},
+ {Opt_tagid, "tagid=%u"},
{Opt_err, NULL}
};
@@ -272,6 +275,20 @@ static int __init root_nfs_parse(char *n
case Opt_noacl:
nfs_data.flags |= NFS_MOUNT_NOACL;
break;
+#ifndef CONFIG_TAGGING_NONE
+ case Opt_tag:
+ nfs_data.flags |= NFS_MOUNT_TAGGED;
+ break;
+ case Opt_notag:
+ nfs_data.flags &= ~NFS_MOUNT_TAGGED;
+ break;
+#endif
+#ifdef CONFIG_PROPAGATE
+ case Opt_tagid:
+ /* use args[0] */
+ nfs_data.flags |= NFS_MOUNT_TAGGED;
+ break;
+#endif
default:
printk(KERN_WARNING "Root-NFS: unknown "
"option: %s\n", p);
--- a/fs/nfs/super.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfs/super.c 2008-04-19 15:14:52.000000000 -0400
@@ -50,6 +50,7 @@
#include <linux/nfs_xdr.h>
#include <linux/magic.h>
#include <linux/parser.h>
+#include <linux/vs_tag.h>
#include <asm/system.h>
#include <asm/uaccess.h>
@@ -458,6 +459,7 @@ static void nfs_show_mount_options(struc
{ NFS_MOUNT_NOACL, ",noacl", "" },
{ NFS_MOUNT_NORDIRPLUS, ",nordirplus", "" },
{ NFS_MOUNT_UNSHARED, ",nosharecache", ""},
+ { NFS_MOUNT_TAGGED, ",tag", "" },
{ 0, NULL, NULL }
};
const struct proc_nfs_info *nfs_infop;
--- a/fs/nfsd/auth.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfsd/auth.c 2008-04-19 17:07:41.000000000 -0400
@@ -10,6 +10,7 @@
#include <linux/sunrpc/svcauth.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/export.h>
+#include <linux/vs_tag.h>
int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
{
@@ -54,19 +55,23 @@ int nfsd_setuser(struct svc_rqst *rqstp,
get_group_info(cred.cr_group_info);
if (cred.cr_uid != (uid_t) -1)
- current->fsuid = cred.cr_uid;
+ current->fsuid = INOTAG_UID(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid);
else
current->fsuid = exp->ex_anon_uid;
if (cred.cr_gid != (gid_t) -1)
- current->fsgid = cred.cr_gid;
+ current->fsgid = INOTAG_GID(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid);
else
current->fsgid = exp->ex_anon_gid;
+ /* this desperately needs a tag :) */
+ current->xid = (xid_t)INOTAG_TAG(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid, 0);
+
if (!cred.cr_group_info)
return -ENOMEM;
ret = set_current_groups(cred.cr_group_info);
put_group_info(cred.cr_group_info);
- if ((cred.cr_uid)) {
+
+ if (INOTAG_UID(DX_TAG_NFSD, cred.cr_uid, cred.cr_gid)) {
current->cap_effective =
cap_drop_nfsd_set(current->cap_effective);
} else {
--- a/fs/nfsd/nfs3xdr.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfsd/nfs3xdr.c 2008-04-19 17:08:09.000000000 -0400
@@ -21,6 +21,7 @@
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/xdr3.h>
+#include <linux/vs_tag.h>
#include "auth.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
@@ -108,6 +109,8 @@ static __be32 *
decode_sattr3(__be32 *p, struct iattr *iap)
{
u32 tmp;
+ uid_t uid = 0;
+ gid_t gid = 0;
iap->ia_valid = 0;
@@ -117,12 +120,15 @@ decode_sattr3(__be32 *p, struct iattr *i
}
if (*p++) {
iap->ia_valid |= ATTR_UID;
- iap->ia_uid = ntohl(*p++);
+ uid = ntohl(*p++);
}
if (*p++) {
iap->ia_valid |= ATTR_GID;
- iap->ia_gid = ntohl(*p++);
+ gid = ntohl(*p++);
}
+ iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid);
+ iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid);
+ iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0);
if (*p++) {
u64 newsize;
@@ -178,8 +184,12 @@ encode_fattr3(struct svc_rqst *rqstp, __
*p++ = htonl(nfs3_ftypes[(stat->mode & S_IFMT) >> 12]);
*p++ = htonl((u32) stat->mode);
*p++ = htonl((u32) stat->nlink);
- *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
- *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
+ *p++ = htonl((u32) nfsd_ruid(rqstp,
+ TAGINO_UID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
+ stat->uid, stat->tag)));
+ *p++ = htonl((u32) nfsd_rgid(rqstp,
+ TAGINO_GID(0 /* FIXME: DX_TAG(dentry->d_inode) */,
+ stat->gid, stat->tag)));
if (S_ISLNK(stat->mode) && stat->size > NFS3_MAXPATHLEN) {
p = xdr_encode_hyper(p, (u64) NFS3_MAXPATHLEN);
} else {
--- a/fs/nfsd/nfs4recover.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfsd/nfs4recover.c 2008-04-20 13:24:11.000000000 -0400
@@ -154,7 +154,7 @@ nfsd4_create_clid_dir(struct nfs4_client
dprintk("NFSD: nfsd4_create_clid_dir: DIRECTORY EXISTS\n");
goto out_put;
}
- status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
+ status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU, NULL);
out_put:
dput(dentry);
out_unlock:
@@ -258,7 +258,7 @@ nfsd4_remove_clid_file(struct dentry *di
return -EINVAL;
}
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- status = vfs_unlink(dir->d_inode, dentry);
+ status = vfs_unlink(dir->d_inode, dentry, NULL);
mutex_unlock(&dir->d_inode->i_mutex);
return status;
}
@@ -273,7 +273,7 @@ nfsd4_clear_clid_dir(struct dentry *dir,
* a kernel from the future.... */
nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
- status = vfs_rmdir(dir->d_inode, dentry);
+ status = vfs_rmdir(dir->d_inode, dentry, NULL);
mutex_unlock(&dir->d_inode->i_mutex);
return status;
}
--- a/fs/nfsd/nfs4xdr.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfsd/nfs4xdr.c 2008-04-19 15:14:52.000000000 -0400
@@ -58,6 +58,7 @@
#include <linux/nfs4_acl.h>
#include <linux/sunrpc/gss_api.h>
#include <linux/sunrpc/svcauth_gss.h>
+#include <linux/vs_tag.h>
#define NFSDDBG_FACILITY NFSDDBG_XDR
@@ -1759,14 +1760,18 @@ out_acl:
WRITE32(stat.nlink);
}
if (bmval1 & FATTR4_WORD1_OWNER) {
- status = nfsd4_encode_user(rqstp, stat.uid, &p, &buflen);
+ status = nfsd4_encode_user(rqstp,
+ TAGINO_UID(DX_TAG(dentry->d_inode),
+ stat.uid, stat.tag), &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
goto out;
}
if (bmval1 & FATTR4_WORD1_OWNER_GROUP) {
- status = nfsd4_encode_group(rqstp, stat.gid, &p, &buflen);
+ status = nfsd4_encode_group(rqstp,
+ TAGINO_GID(DX_TAG(dentry->d_inode),
+ stat.gid, stat.tag), &p, &buflen);
if (status == nfserr_resource)
goto out_resource;
if (status)
--- a/fs/nfsd/nfsxdr.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfsd/nfsxdr.c 2008-04-20 13:23:36.000000000 -0400
@@ -15,6 +15,7 @@
#include <linux/nfsd/nfsd.h>
#include <linux/nfsd/xdr.h>
#include <linux/mm.h>
+#include <linux/vs_tag.h>
#include "auth.h"
#define NFSDDBG_FACILITY NFSDDBG_XDR
@@ -98,6 +99,8 @@ static __be32 *
decode_sattr(__be32 *p, struct iattr *iap)
{
u32 tmp, tmp1;
+ uid_t uid = 0;
+ gid_t gid = 0;
iap->ia_valid = 0;
@@ -111,12 +114,15 @@ decode_sattr(__be32 *p, struct iattr *ia
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_UID;
- iap->ia_uid = tmp;
+ uid = tmp;
}
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_GID;
- iap->ia_gid = tmp;
+ gid = tmp;
}
+ iap->ia_uid = INOTAG_UID(DX_TAG_NFSD, uid, gid);
+ iap->ia_gid = INOTAG_GID(DX_TAG_NFSD, uid, gid);
+ iap->ia_tag = INOTAG_TAG(DX_TAG_NFSD, uid, gid, 0);
if ((tmp = ntohl(*p++)) != (u32)-1) {
iap->ia_valid |= ATTR_SIZE;
iap->ia_size = tmp;
@@ -161,8 +167,10 @@ encode_fattr(struct svc_rqst *rqstp, __b
*p++ = htonl(nfs_ftypes[type >> 12]);
*p++ = htonl((u32) stat->mode);
*p++ = htonl((u32) stat->nlink);
- *p++ = htonl((u32) nfsd_ruid(rqstp, stat->uid));
- *p++ = htonl((u32) nfsd_rgid(rqstp, stat->gid));
+ *p++ = htonl((u32) nfsd_ruid(rqstp,
+ TAGINO_UID(DX_TAG(dentry->d_inode), stat->uid, stat->tag)));
+ *p++ = htonl((u32) nfsd_rgid(rqstp,
+ TAGINO_GID(DX_TAG(dentry->d_inode), stat->gid, stat->tag)));
if (S_ISLNK(type) && stat->size > NFS_MAXPATHLEN) {
*p++ = htonl(NFS_MAXPATHLEN);
--- a/fs/nfsd/vfs.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/nfsd/vfs.c 2008-04-21 17:24:34.000000000 -0400
@@ -1258,13 +1258,13 @@ nfsd_create(struct svc_rqst *rqstp, stru
host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
break;
case S_IFDIR:
- host_err = vfs_mkdir(dirp, dchild, iap->ia_mode);
+ host_err = vfs_mkdir(dirp, dchild, iap->ia_mode, NULL);
break;
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
- host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
+ host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev, NULL);
break;
default:
printk("nfsd: bad file type %o in nfsd_create\n", type);
@@ -1529,11 +1529,13 @@ nfsd_symlink(struct svc_rqst *rqstp, str
else {
strncpy(path_alloced, path, plen);
path_alloced[plen] = 0;
- host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced, mode);
+ host_err = vfs_symlink(dentry->d_inode, dnew,
+ path_alloced, mode, NULL);
kfree(path_alloced);
}
} else
- host_err = vfs_symlink(dentry->d_inode, dnew, path, mode);
+ host_err = vfs_symlink(dentry->d_inode, dnew,
+ path, mode, NULL);
if (!host_err) {
if (EX_ISSYNC(fhp->fh_export))
@@ -1592,7 +1594,7 @@ nfsd_link(struct svc_rqst *rqstp, struct
dold = tfhp->fh_dentry;
dest = dold->d_inode;
- host_err = vfs_link(dold, dirp, dnew);
+ host_err = vfs_link(dold, dirp, dnew, NULL);
if (!host_err) {
if (EX_ISSYNC(ffhp->fh_export)) {
err = nfserrno(nfsd_sync_dir(ddir));
@@ -1757,9 +1759,9 @@ nfsd_unlink(struct svc_rqst *rqstp, stru
host_err = -EPERM;
} else
#endif
- host_err = vfs_unlink(dirp, rdentry);
+ host_err = vfs_unlink(dirp, rdentry, NULL);
} else { /* It's RMDIR */
- host_err = vfs_rmdir(dirp, rdentry);
+ host_err = vfs_rmdir(dirp, rdentry, NULL);
}
dput(rdentry);
@@ -1876,7 +1878,8 @@ nfsd_permission(struct svc_rqst *rqstp,
*/
if (!(acc & MAY_LOCAL_ACCESS))
if (acc & (MAY_WRITE | MAY_SATTR | MAY_TRUNC)) {
- if (exp_rdonly(rqstp, exp) || IS_RDONLY(inode))
+ if (exp_rdonly(rqstp, exp) || IS_RDONLY(inode)
+ || MNT_IS_RDONLY(exp->ex_path.mnt))
return nfserr_rofs;
if (/* (acc & MAY_WRITE) && */ IS_IMMUTABLE(inode))
return nfserr_perm;
--- a/fs/ocfs2/dlm/dlmfs.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/dlm/dlmfs.c 2008-04-19 15:14:52.000000000 -0400
@@ -43,6 +43,7 @@
#include <linux/init.h>
#include <linux/string.h>
#include <linux/backing-dev.h>
+#include <linux/vs_tag.h>
#include <asm/uaccess.h>
@@ -342,6 +343,7 @@ static struct inode *dlmfs_get_root_inod
inode->i_mode = mode;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
+ inode->i_tag = dx_current_fstag(sb);
inode->i_blocks = 0;
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
@@ -368,6 +370,7 @@ static struct inode *dlmfs_get_inode(str
inode->i_mode = mode;
inode->i_uid = current->fsuid;
inode->i_gid = current->fsgid;
+ inode->i_tag = dx_current_fstag(sb);
inode->i_blocks = 0;
inode->i_mapping->backing_dev_info = &dlmfs_backing_dev_info;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
--- a/fs/ocfs2/dlmglue.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/dlmglue.c 2008-04-19 15:14:52.000000000 -0400
@@ -1665,6 +1665,7 @@ static void __ocfs2_stuff_meta_lvb(struc
lvb->lvb_iclusters = cpu_to_be32(oi->ip_clusters);
lvb->lvb_iuid = cpu_to_be32(inode->i_uid);
lvb->lvb_igid = cpu_to_be32(inode->i_gid);
+ lvb->lvb_itag = cpu_to_be16(inode->i_tag);
lvb->lvb_imode = cpu_to_be16(inode->i_mode);
lvb->lvb_inlink = cpu_to_be16(inode->i_nlink);
lvb->lvb_iatime_packed =
@@ -1719,6 +1720,7 @@ static void ocfs2_refresh_inode_from_lvb
inode->i_uid = be32_to_cpu(lvb->lvb_iuid);
inode->i_gid = be32_to_cpu(lvb->lvb_igid);
+ inode->i_tag = be16_to_cpu(lvb->lvb_itag);
inode->i_mode = be16_to_cpu(lvb->lvb_imode);
inode->i_nlink = be16_to_cpu(lvb->lvb_inlink);
ocfs2_unpack_timespec(&inode->i_atime,
--- a/fs/ocfs2/dlmglue.h 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/dlmglue.h 2008-04-19 15:14:52.000000000 -0400
@@ -46,7 +46,8 @@ struct ocfs2_meta_lvb {
__be16 lvb_inlink;
__be32 lvb_iattr;
__be32 lvb_igeneration;
- __be32 lvb_reserved2;
+ __be16 lvb_itag;
+ __be16 lvb_reserved2;
};
/* ocfs2_inode_lock_full() 'arg_flags' flags */
--- a/fs/ocfs2/file.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/file.c 2008-04-19 15:14:52.000000000 -0400
@@ -1054,13 +1054,15 @@ int ocfs2_setattr(struct dentry *dentry,
mlog(0, "uid change: %d\n", attr->ia_uid);
if (attr->ia_valid & ATTR_GID)
mlog(0, "gid change: %d\n", attr->ia_gid);
+ if (attr->ia_valid & ATTR_TAG)
+ mlog(0, "tag change: %d\n", attr->ia_tag);
if (attr->ia_valid & ATTR_SIZE)
mlog(0, "size change...\n");
if (attr->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME))
mlog(0, "time change...\n");
#define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
- | ATTR_GID | ATTR_UID | ATTR_MODE)
+ | ATTR_GID | ATTR_UID | ATTR_TAG | ATTR_MODE)
if (!(attr->ia_valid & OCFS2_VALID_ATTRS)) {
mlog(0, "can't handle attrs: 0x%x\n", attr->ia_valid);
return 0;
@@ -2229,6 +2231,7 @@ const struct inode_operations ocfs2_file
const struct inode_operations ocfs2_special_file_iops = {
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
+ .sync_flags = ocfs2_sync_flags,
.permission = ocfs2_permission,
};
--- a/fs/ocfs2/inode.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/inode.c 2008-04-21 10:16:08.000000000 -0400
@@ -28,6 +28,7 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
+#include <linux/vs_tag.h>
#include <asm/byteorder.h>
@@ -42,6 +43,7 @@
#include "file.h"
#include "heartbeat.h"
#include "inode.h"
+#include "ioctl.h"
#include "journal.h"
#include "namei.h"
#include "suballoc.h"
@@ -79,6 +81,10 @@ void ocfs2_set_inode_flags(struct inode
if (flags & OCFS2_IMMUTABLE_FL)
inode->i_flags |= S_IMMUTABLE;
+ if (flags & OCFS2_IUNLINK_FL)
+ inode->i_flags |= S_IUNLINK;
+ if (flags & OCFS2_BARRIER_FL)
+ inode->i_flags |= S_BARRIER;
if (flags & OCFS2_SYNC_FL)
inode->i_flags |= S_SYNC;
@@ -109,6 +115,27 @@ void ocfs2_get_inode_flags(struct ocfs2_
oi->ip_attr |= OCFS2_DIRSYNC_FL;
}
+int ocfs2_sync_flags(struct inode *inode)
+{
+ unsigned int oldflags, newflags;
+
+ oldflags = OCFS2_I(inode)->ip_flags;
+ newflags = oldflags & ~(OCFS2_IMMUTABLE_FL |
+ OCFS2_IUNLINK_FL | OCFS2_BARRIER_FL);
+
+ if (IS_IMMUTABLE(inode))
+ newflags |= OCFS2_IMMUTABLE_FL;
+ if (IS_IUNLINK(inode))
+ newflags |= OCFS2_IUNLINK_FL;
+ if (IS_BARRIER(inode))
+ newflags |= OCFS2_BARRIER_FL;
+
+ if (oldflags ^ newflags)
+ return ocfs2_set_inode_attr(inode,
+ newflags, OCFS2_FL_MASK);
+ return 0;
+}
+
struct inode *ocfs2_iget(struct ocfs2_super *osb, u64 blkno, unsigned flags,
int sysfile_type)
{
@@ -219,6 +246,8 @@ int ocfs2_populate_inode(struct inode *i
struct super_block *sb;
struct ocfs2_super *osb;
int status = -EINVAL;
+ uid_t uid;
+ gid_t gid;
mlog_entry("(0x%p, size:%llu)\n", inode,
(unsigned long long)le64_to_cpu(fe->i_size));
@@ -254,8 +283,12 @@ int ocfs2_populate_inode(struct inode *i
inode->i_generation = le32_to_cpu(fe->i_generation);
inode->i_rdev = huge_decode_dev(le64_to_cpu(fe->id1.dev1.i_rdev));
inode->i_mode = le16_to_cpu(fe->i_mode);
- inode->i_uid = le32_to_cpu(fe->i_uid);
- inode->i_gid = le32_to_cpu(fe->i_gid);
+ uid = le32_to_cpu(fe->i_uid);
+ gid = le32_to_cpu(fe->i_gid);
+ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+ /* le16_to_cpu(raw_inode->i_raw_tag)i */ 0);
/* Fast symlinks will have i_size but no allocated clusters. */
if (S_ISLNK(inode->i_mode) && !fe->i_clusters)
@@ -1230,8 +1263,11 @@ int ocfs2_mark_inode_dirty(handle_t *han
fe->i_size = cpu_to_le64(i_size_read(inode));
fe->i_links_count = cpu_to_le16(inode->i_nlink);
- fe->i_uid = cpu_to_le32(inode->i_uid);
- fe->i_gid = cpu_to_le32(inode->i_gid);
+ fe->i_uid = cpu_to_le32(TAGINO_UID(DX_TAG(inode),
+ inode->i_uid, inode->i_tag));
+ fe->i_gid = cpu_to_le32(TAGINO_GID(DX_TAG(inode),
+ inode->i_gid, inode->i_tag));
+ /* i_tag = = cpu_to_le16(inode->i_tag); */
fe->i_mode = cpu_to_le16(inode->i_mode);
fe->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
fe->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
@@ -1259,16 +1295,25 @@ leave:
void ocfs2_refresh_inode(struct inode *inode,
struct ocfs2_dinode *fe)
{
+ uid_t uid;
+ gid_t gid;
+
spin_lock(&OCFS2_I(inode)->ip_lock);
OCFS2_I(inode)->ip_clusters = le32_to_cpu(fe->i_clusters);
OCFS2_I(inode)->ip_attr = le32_to_cpu(fe->i_attr);
+ /* OCFS2_I(inode)->ip_flags &= ~OCFS2_FL_MASK;
+ OCFS2_I(inode)->ip_flags |= le32_to_cpu(fe->i_flags) & OCFS2_FL_MASK; */
OCFS2_I(inode)->ip_dyn_features = le16_to_cpu(fe->i_dyn_features);
ocfs2_set_inode_flags(inode);
i_size_write(inode, le64_to_cpu(fe->i_size));
inode->i_nlink = le16_to_cpu(fe->i_links_count);
- inode->i_uid = le32_to_cpu(fe->i_uid);
- inode->i_gid = le32_to_cpu(fe->i_gid);
+ uid = le32_to_cpu(fe->i_uid);
+ gid = le32_to_cpu(fe->i_gid);
+ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid,
+ /* le16_to_cpu(raw_inode->i_raw_tag)i */ 0);
inode->i_mode = le16_to_cpu(fe->i_mode);
if (S_ISLNK(inode->i_mode) && le32_to_cpu(fe->i_clusters) == 0)
inode->i_blocks = 0;
--- a/fs/ocfs2/inode.h 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/inode.h 2008-04-19 15:14:52.000000000 -0400
@@ -143,6 +143,7 @@ int ocfs2_aio_write(struct file *file, s
void ocfs2_set_inode_flags(struct inode *inode);
void ocfs2_get_inode_flags(struct ocfs2_inode_info *oi);
+int ocfs2_sync_flags(struct inode *inode);
static inline blkcnt_t ocfs2_inode_sector_count(struct inode *inode)
{
--- a/fs/ocfs2/ioctl.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/ioctl.c 2008-04-19 15:14:52.000000000 -0400
@@ -41,7 +41,7 @@ static int ocfs2_get_inode_attr(struct i
return status;
}
-static int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
+int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
unsigned mask)
{
struct ocfs2_inode_info *ocfs2_inode = OCFS2_I(inode);
--- a/fs/ocfs2/ioctl.h 2008-04-17 10:33:02.000000000 -0400
+++ a/fs/ocfs2/ioctl.h 2008-04-19 15:14:52.000000000 -0400
@@ -10,6 +10,9 @@
#ifndef OCFS2_IOCTL_H
#define OCFS2_IOCTL_H
+int ocfs2_set_inode_attr(struct inode *inode, unsigned flags,
+ unsigned mask);
+
int ocfs2_ioctl(struct inode * inode, struct file * filp,
unsigned int cmd, unsigned long arg);
long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg);
--- a/fs/ocfs2/namei.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/namei.c 2008-04-19 15:14:52.000000000 -0400
@@ -40,6 +40,7 @@
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/highmem.h>
+#include <linux/vs_tag.h>
#define MLOG_MASK_PREFIX ML_NAMEI
#include <cluster/masklog.h>
@@ -366,6 +367,9 @@ static int ocfs2_mknod_locked(struct ocf
u64 fe_blkno = 0;
u16 suballoc_bit;
struct inode *inode = NULL;
+ uid_t uid;
+ gid_t gid;
+ tag_t tag;
mlog_entry("(0x%p, 0x%p, %d, %lu, '%.*s')\n", dir, dentry, mode,
(unsigned long)dev, dentry->d_name.len,
@@ -425,13 +429,19 @@ static int ocfs2_mknod_locked(struct ocf
fe->i_blkno = cpu_to_le64(fe_blkno);
fe->i_suballoc_bit = cpu_to_le16(suballoc_bit);
fe->i_suballoc_slot = cpu_to_le16(osb->slot_num);
- fe->i_uid = cpu_to_le32(current->fsuid);
+
+ tag = dx_current_fstag(osb->sb);
+ uid = current->fsuid;
if (dir->i_mode & S_ISGID) {
- fe->i_gid = cpu_to_le32(dir->i_gid);
+ gid = dir->i_gid;
if (S_ISDIR(mode))
mode |= S_ISGID;
} else
- fe->i_gid = cpu_to_le32(current->fsgid);
+ gid = current->fsgid;
+
+ fe->i_uid = cpu_to_le32(TAGINO_UID(DX_TAG(inode), uid, tag));
+ fe->i_gid = cpu_to_le32(TAGINO_GID(DX_TAG(inode), gid, tag));
+ inode->i_tag = tag;
fe->i_mode = cpu_to_le16(mode);
if (S_ISCHR(mode) || S_ISBLK(mode))
fe->id1.dev1.i_rdev = cpu_to_le64(huge_encode_dev(dev));
@@ -1917,5 +1927,6 @@ const struct inode_operations ocfs2_dir_
.rename = ocfs2_rename,
.setattr = ocfs2_setattr,
.getattr = ocfs2_getattr,
+ .sync_flags = ocfs2_sync_flags,
.permission = ocfs2_permission,
};
--- a/fs/ocfs2/ocfs2_fs.h 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/ocfs2_fs.h 2008-04-19 15:14:52.000000000 -0400
@@ -188,8 +188,12 @@
#define OCFS2_NOATIME_FL (0x00000080) /* do not update atime */
#define OCFS2_DIRSYNC_FL (0x00010000) /* dirsync behaviour (directories only) */
+#define OCFS2_BARRIER_FL (0x04000000) /* Barrier for chroot() */
+#define OCFS2_IUNLINK_FL (0x08000000) /* Immutable unlink */
+
#define OCFS2_FL_VISIBLE (0x000100FF) /* User visible flags */
#define OCFS2_FL_MODIFIABLE (0x000100FF) /* User modifiable flags */
+#define OCFS2_FL_MASK (0x0F0100FF)
/*
* Extent record flags (e_node.leaf.flags)
--- a/fs/ocfs2/ocfs2.h 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/ocfs2.h 2008-04-21 10:15:31.000000000 -0400
@@ -171,6 +171,7 @@ enum ocfs2_mount_options
OCFS2_MOUNT_ERRORS_PANIC = 1 << 3, /* Panic on errors */
OCFS2_MOUNT_DATA_WRITEBACK = 1 << 4, /* No data ordering */
OCFS2_MOUNT_LOCALFLOCKS = 1 << 5, /* No cluster aware user file locks */
+ OCFS2_MOUNT_TAGGED = 1 << 8, /* use tagging */
};
#define OCFS2_OSB_SOFT_RO 0x0001
--- a/fs/ocfs2/super.c 2008-04-17 12:05:40.000000000 -0400
+++ a/fs/ocfs2/super.c 2008-04-23 08:31:10.000000000 -0400
@@ -154,6 +154,7 @@ enum {
Opt_commit,
Opt_localalloc,
Opt_localflocks,
+ Opt_tag, Opt_notag, Opt_tagid,
Opt_err,
};
@@ -172,6 +173,9 @@ static match_table_t tokens = {
{Opt_commit, "commit=%u"},
{Opt_localalloc, "localalloc=%d"},
{Opt_localflocks, "localflocks"},
+ {Opt_tag, "tag"},
+ {Opt_notag, "notag"},
+ {Opt_tagid, "tagid=%u"},
{Opt_err, NULL}
};
@@ -391,6 +395,13 @@ static int ocfs2_remount(struct super_bl
goto out;
}
+ if ((parsed_options.mount_opt & OCFS2_MOUNT_TAGGED) &&
+ !(sb->s_flags & MS_TAGGED)) {
+ ret = -EINVAL;
+ mlog(ML_ERROR, "Cannot change tagging on remount\n");
+ goto out;
+ }
+
if ((osb->s_mount_opt & OCFS2_MOUNT_HB_LOCAL) !=
(parsed_options.mount_opt & OCFS2_MOUNT_HB_LOCAL)) {
ret = -EINVAL;
@@ -691,6 +702,9 @@ static int ocfs2_fill_super(struct super
ocfs2_complete_mount_recovery(osb);
+ if (osb->s_mount_opt & OCFS2_MOUNT_TAGGED)
+ sb->s_flags |= MS_TAGGED;
+
if (ocfs2_mount_local(osb))
snprintf(nodestr, sizeof(nodestr), "local");
else
@@ -864,6 +878,20 @@ static int ocfs2_parse_options(struct su
if (!is_remount)
mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS;
break;
+#ifndef CONFIG_TAGGING_NONE
+ case Opt_tag:
+ mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
+ break;
+ case Opt_notag:
+ mopt->mount_opt &= ~OCFS2_MOUNT_TAGGED;
+ break;
+#endif
+#ifdef CONFIG_PROPAGATE
+ case Opt_tagid:
+ /* use args[0] */
+ mopt->mount_opt |= OCFS2_MOUNT_TAGGED;
+ break;
+#endif
default:
mlog(ML_ERROR,
"Unrecognized mount option \"%s\" "
--- a/fs/open.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/open.c 2008-04-21 13:51:52.000000000 -0400
@@ -27,22 +27,31 @@
#include <linux/rcupdate.h>
#include <linux/audit.h>
#include <linux/falloc.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vs_tag.h>
+#include <linux/vs_cowbl.h>
int vfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
int retval = -ENODEV;
if (dentry) {
+ struct super_block *sb = dentry->d_sb;
+
retval = -ENOSYS;
- if (dentry->d_sb->s_op->statfs) {
+ if (sb->s_op->statfs) {
memset(buf, 0, sizeof(*buf));
retval = security_sb_statfs(dentry);
if (retval)
return retval;
- retval = dentry->d_sb->s_op->statfs(dentry, buf);
+ retval = sb->s_op->statfs(dentry, buf);
if (retval == 0 && buf->f_frsize == 0)
buf->f_frsize = buf->f_bsize;
}
+ if (!vx_check(0, VS_ADMIN|VS_WATCH))
+ vx_vsi_statfs(sb, buf);
}
return retval;
}
@@ -249,7 +258,7 @@ static long do_sys_truncate(const char _
goto dput_and_out;
error = -EROFS;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd.path.mnt))
goto dput_and_out;
error = -EPERM;
@@ -458,7 +467,7 @@ asmlinkage long sys_faccessat(int dfd, c
special_file(nd.path.dentry->d_inode->i_mode))
goto out_path_release;
- if(IS_RDONLY(nd.path.dentry->d_inode))
+ if(IS_RDONLY(nd.path.dentry->d_inode) || MNT_IS_RDONLY(nd.path.mnt))
res = -EROFS;
out_path_release:
@@ -568,7 +577,7 @@ asmlinkage long sys_fchmod(unsigned int
audit_inode(NULL, dentry);
err = -EROFS;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) || MNT_IS_RDONLY(file->f_vfsmnt))
goto out_putf;
err = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
@@ -598,11 +607,11 @@ asmlinkage long sys_fchmodat(int dfd, co
error = __user_walk_fd(dfd, filename, LOOKUP_FOLLOW, &nd);
if (error)
goto out;
- inode = nd.path.dentry->d_inode;
-
- error = -EROFS;
- if (IS_RDONLY(inode))
+
+ error = cow_check_and_break(&nd);
+ if (error)
goto dput_and_out;
+ inode = nd.path.dentry->d_inode;
error = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
@@ -627,7 +636,8 @@ asmlinkage long sys_chmod(const char __u
return sys_fchmodat(AT_FDCWD, filename, mode);
}
-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
+static int chown_common(struct dentry *dentry, struct vfsmount *mnt,
+ uid_t user, gid_t group)
{
struct inode * inode;
int error;
@@ -639,7 +649,7 @@ static int chown_common(struct dentry *
goto out;
}
error = -EROFS;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) || MNT_IS_RDONLY(mnt))
goto out;
error = -EPERM;
if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
@@ -647,11 +657,11 @@ static int chown_common(struct dentry *
newattrs.ia_valid = ATTR_CTIME;
if (user != (uid_t) -1) {
newattrs.ia_valid |= ATTR_UID;
- newattrs.ia_uid = user;
+ newattrs.ia_uid = dx_map_uid(user);
}
if (group != (gid_t) -1) {
newattrs.ia_valid |= ATTR_GID;
- newattrs.ia_gid = group;
+ newattrs.ia_gid = dx_map_gid(group);
}
if (!S_ISDIR(inode->i_mode))
newattrs.ia_valid |=
@@ -671,7 +681,11 @@ asmlinkage long sys_chown(const char __u
error = user_path_walk(filename, &nd);
if (error)
goto out;
- error = chown_common(nd.path.dentry, user, group);
+#ifdef CONFIG_VSERVER_COWBL
+ error = cow_check_and_break(&nd);
+ if (!error)
+#endif
+ error = chown_common(nd.path.dentry, nd.path.mnt, user, group);
path_put(&nd.path);
out:
return error;
@@ -691,7 +705,11 @@ asmlinkage long sys_fchownat(int dfd, co
error = __user_walk_fd(dfd, filename, follow, &nd);
if (error)
goto out;
- error = chown_common(nd.path.dentry, user, group);
+#ifdef CONFIG_VSERVER_COWBL
+ error = cow_check_and_break(&nd);
+ if (!error)
+#endif
+ error = chown_common(nd.path.dentry, nd.path.mnt, user, group);
path_put(&nd.path);
out:
return error;
@@ -705,7 +723,11 @@ asmlinkage long sys_lchown(const char __
error = user_path_walk_link(filename, &nd);
if (error)
goto out;
- error = chown_common(nd.path.dentry, user, group);
+#ifdef CONFIG_VSERVER_COWBL
+ error = cow_check_and_break(&nd);
+ if (!error)
+#endif
+ error = chown_common(nd.path.dentry, nd.path.mnt, user, group);
path_put(&nd.path);
out:
return error;
@@ -724,7 +746,7 @@ asmlinkage long sys_fchown(unsigned int
dentry = file->f_path.dentry;
audit_inode(NULL, dentry);
- error = chown_common(dentry, user, group);
+ error = chown_common(dentry, file->f_vfsmnt, user, group);
fput(file);
out:
return error;
@@ -971,6 +993,7 @@ repeat:
else
FD_CLR(fd, fdt->close_on_exec);
files->next_fd = fd + 1;
+ vx_openfd_inc(fd);
#if 1
/* Sanity check */
if (fdt->fd[fd] != NULL) {
@@ -998,6 +1021,7 @@ static void __put_unused_fd(struct files
__FD_CLR(fd, fdt->open_fds);
if (fd < files->next_fd)
files->next_fd = fd;
+ vx_openfd_dec(fd);
}
void put_unused_fd(unsigned int fd)
--- a/fs/proc/array.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/proc/array.c 2008-05-21 14:08:19.000000000 -0400
@@ -79,6 +79,8 @@
#include <linux/delayacct.h>
#include <linux/seq_file.h>
#include <linux/pid_namespace.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -140,8 +142,9 @@ static const char *task_state_array[] =
"D (disk sleep)", /* 2 */
"T (stopped)", /* 4 */
"T (tracing stop)", /* 8 */
- "Z (zombie)", /* 16 */
- "X (dead)" /* 32 */
+ "H (on hold)", /* 16 */
+ "Z (zombie)", /* 32 */
+ "X (dead)", /* 64 */
};
static inline const char *get_task_state(struct task_struct *tsk)
@@ -162,6 +165,7 @@ static inline void task_state(struct seq
struct group_info *group_info;
int g;
struct fdtable *fdt = NULL;
+/* + pid_t pid, ptgid, tppid, tgid; */
pid_t ppid, tpid;
rcu_read_lock();
@@ -169,6 +173,12 @@ static inline void task_state(struct seq
task_tgid_nr_ns(rcu_dereference(p->real_parent), ns) : 0;
tpid = pid_alive(p) && p->ptrace ?
task_pid_nr_ns(rcu_dereference(p->parent), ns) : 0;
+/* tgid = vx_map_tgid(p->tgid);
+ pid = vx_map_pid(p->pid);
+ ptgid = vx_map_pid(pid_alive(p) ?
+ rcu_dereference(p->real_parent)->tgid : 0);
+ tppid = vx_map_pid(pid_alive(p) && p->ptrace ?
+ rcu_dereference(p->parent)->pid : 0); */
seq_printf(m,
"State:\t%s\n"
"Tgid:\t%d\n"
@@ -308,6 +318,45 @@ static inline void task_context_switch_c
p->nivcsw);
}
+int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task)
+{
+ seq_printf(m, "Proxy:\t%p(%c)\n"
+ "Count:\t%u\n"
+ "uts:\t%p(%c)\n"
+ "ipc:\t%p(%c)\n"
+ "mnt:\t%p(%c)\n"
+ "pid:\t%p(%c)\n"
+ "user:\t%p(%c)\n"
+ "net:\t%p(%c)\n",
+ task->nsproxy,
+ (task->nsproxy == init_task.nsproxy ? 'I' : '-'),
+ atomic_read(&task->nsproxy->count),
+ task->nsproxy->uts_ns,
+ (task->nsproxy->uts_ns == init_task.nsproxy->uts_ns ? 'I' : '-'),
+ task->nsproxy->ipc_ns,
+ (task->nsproxy->ipc_ns == init_task.nsproxy->ipc_ns ? 'I' : '-'),
+ task->nsproxy->mnt_ns,
+ (task->nsproxy->mnt_ns == init_task.nsproxy->mnt_ns ? 'I' : '-'),
+ task->nsproxy->pid_ns,
+ (task->nsproxy->pid_ns == init_task.nsproxy->pid_ns ? 'I' : '-'),
+ task->nsproxy->user_ns,
+ (task->nsproxy->user_ns == init_task.nsproxy->user_ns ? 'I' : '-'),
+ task->nsproxy->net_ns,
+ (task->nsproxy->net_ns == init_task.nsproxy->net_ns ? 'I' : '-'));
+ return 0;
+}
+
+void task_vs_id(struct seq_file *m, struct task_struct *task)
+{
+ if (task_vx_flags(task, VXF_HIDE_VINFO, 0))
+ return;
+
+ seq_printf(m, "VxID: %d\n", vx_task_xid(task));
+ seq_printf(m, "NxID: %d\n", nx_task_nid(task));
+}
+
+
int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task)
{
@@ -323,6 +372,7 @@ int proc_pid_status(struct seq_file *m,
task_sig(m, task);
task_cap(m, task);
cpuset_task_status_allowed(m, task);
+ task_vs_id(m, task);
#if defined(CONFIG_S390)
task_show_regs(m, task);
#endif
@@ -494,6 +544,17 @@ static int do_task_stat(struct seq_file
/* convert nsec -> ticks */
start_time = nsec_to_clock_t(start_time);
+ /* fixup start time for virt uptime */
+ if (vx_flags(VXF_VIRT_UPTIME, 0)) {
+ unsigned long long bias =
+ current->vx_info->cvirt.bias_clock;
+
+ if (start_time > bias)
+ start_time -= bias;
+ else
+ start_time = 0;
+ }
+
seq_printf(m, "%d (%s) %c %d %d %d %d %d %u %lu \
%lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \
%lu %lu %lu %lu %lu %lu %lu %lu %d %d %u %u %llu %lu %ld\n",
--- a/fs/proc/base.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/proc/base.c 2008-05-21 14:08:19.000000000 -0400
@@ -76,6 +76,8 @@
#include <linux/oom.h>
#include <linux/elf.h>
#include <linux/pid_namespace.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
#include "internal.h"
/* NOTE:
@@ -1290,6 +1292,8 @@ static struct inode *proc_pid_make_inode
inode->i_uid = task->euid;
inode->i_gid = task->egid;
}
+ /* procfs is xid tagged */
+ inode->i_tag = (tag_t)vx_task_xid(task);
security_task_to_inode(task, inode);
out:
@@ -1833,6 +1837,13 @@ static struct dentry *proc_pident_lookup
if (!task)
goto out_no_task;
+ /* TODO: maybe we can come up with a generic approach? */
+ if (task_vx_flags(task, VXF_HIDE_VINFO, 0) &&
+ (dentry->d_name.len == 5) &&
+ (!memcmp(dentry->d_name.name, "vinfo", 5) ||
+ !memcmp(dentry->d_name.name, "ninfo", 5)))
+ goto out;
+
/*
* Yes, it does not scale. And it should not. Don't add
* new entries into /proc/<tgid>/ without very good reasons.
@@ -2220,7 +2231,7 @@ out_iput:
static struct dentry *proc_base_lookup(struct inode *dir, struct dentry *dentry)
{
struct dentry *error;
- struct task_struct *task = get_proc_task(dir);
+ struct task_struct *task = get_proc_task_real(dir);
const struct pid_entry *p, *last;
error = ERR_PTR(-ENOENT);
@@ -2285,6 +2296,9 @@ static int proc_pid_io_accounting(struct
static const struct file_operations proc_task_operations;
static const struct inode_operations proc_task_inode_operations;
+extern int proc_pid_vx_info(struct task_struct *, char *);
+extern int proc_pid_nx_info(struct task_struct *, char *);
+
static const struct pid_entry tgid_base_stuff[] = {
DIR("task", S_IRUGO|S_IXUGO, task),
DIR("fd", S_IRUSR|S_IXUSR, fd),
@@ -2335,6 +2349,8 @@ static const struct pid_entry tgid_base_
#ifdef CONFIG_CGROUPS
REG("cgroup", S_IRUGO, cgroup),
#endif
+ INF("vinfo", S_IRUGO, pid_vx_info),
+ INF("ninfo", S_IRUGO, pid_nx_info),
INF("oom_score", S_IRUGO, oom_score),
REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
#ifdef CONFIG_AUDITSYSCALL
@@ -2350,6 +2366,7 @@ static const struct pid_entry tgid_base_
#ifdef CONFIG_TASK_IO_ACCOUNTING
INF("io", S_IRUGO, pid_io_accounting),
#endif
+ ONE("nsproxy", S_IRUGO, pid_nsproxy),
};
static int proc_tgid_base_readdir(struct file * filp,
@@ -2547,7 +2564,7 @@ retry:
iter.task = NULL;
pid = find_ge_pid(iter.tgid, ns);
if (pid) {
- iter.tgid = pid_nr_ns(pid, ns);
+ iter.tgid = pid_unmapped_nr_ns(pid, ns);
iter.task = pid_task(pid, PIDTYPE_PID);
/* What we to know is if the pid we have find is the
* pid of a thread_group_leader. Testing for task
@@ -2577,7 +2594,7 @@ static int proc_pid_fill_cache(struct fi
struct tgid_iter iter)
{
char name[PROC_NUMBUF];
- int len = snprintf(name, sizeof(name), "%d", iter.tgid);
+ int len = snprintf(name, sizeof(name), "%d", vx_map_tgid(iter.tgid));
return proc_fill_cache(filp, dirent, filldir, name, len,
proc_pid_instantiate, iter.task, NULL);
}
@@ -2586,7 +2603,7 @@ static int proc_pid_fill_cache(struct fi
int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
{
unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY;
- struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode);
+ struct task_struct *reaper = get_proc_task_real(filp->f_path.dentry->d_inode);
struct tgid_iter iter;
struct pid_namespace *ns;
@@ -2606,6 +2623,8 @@ int proc_pid_readdir(struct file * filp,
iter.task;
iter.tgid += 1, iter = next_tgid(ns, iter)) {
filp->f_pos = iter.tgid + TGID_OFFSET;
+ if (!vx_proc_task_visible(iter.task))
+ continue;
if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
put_task_struct(iter.task);
goto out;
@@ -2742,6 +2761,8 @@ static struct dentry *proc_task_lookup(s
tid = name_to_int(dentry);
if (tid == ~0U)
goto out;
+ if (vx_current_initpid(tid))
+ goto out;
ns = dentry->d_sb->s_fs_info;
rcu_read_lock();
--- a/fs/proc/generic.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/proc/generic.c 2008-04-20 14:23:26.000000000 -0400
@@ -21,6 +21,7 @@
#include <linux/bitops.h>
#include <linux/spinlock.h>
#include <linux/completion.h>
+#include <linux/vserver/inode.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -389,6 +390,8 @@ struct dentry *proc_lookup_de(struct pro
for (de = de->subdir; de ; de = de->next) {
if (de->namelen != dentry->d_name.len)
continue;
+ if (!vx_hide_check(0, de->vx_flags))
+ continue;
if (!memcmp(dentry->d_name.name, de->name, de->namelen)) {
unsigned int ino;
@@ -397,6 +400,8 @@ struct dentry *proc_lookup_de(struct pro
spin_unlock(&proc_subdir_lock);
error = -EINVAL;
inode = proc_get_inode(dir->i_sb, ino, de);
+ /* generic proc entries belong to the host */
+ inode->i_tag = 0;
goto out_unlock;
}
}
@@ -481,6 +486,8 @@ int proc_readdir_de(struct proc_dir_entr
/* filldir passes info to user space */
de_get(de);
+ if (!vx_hide_check(0, de->vx_flags))
+ goto skip;
spin_unlock(&proc_subdir_lock);
if (filldir(dirent, de->name, de->namelen, filp->f_pos,
de->low_ino, de->mode >> 12) < 0) {
@@ -488,6 +495,7 @@ int proc_readdir_de(struct proc_dir_entr
goto out;
}
spin_lock(&proc_subdir_lock);
+ skip:
filp->f_pos++;
next = de->next;
de_put(de);
@@ -602,6 +610,7 @@ static struct proc_dir_entry *__proc_cre
ent->nlink = nlink;
atomic_set(&ent->count, 1);
ent->pde_users = 0;
+ ent->vx_flags = IATTR_PROC_DEFAULT;
spin_lock_init(&ent->pde_unload_lock);
ent->pde_unload_completion = NULL;
out:
@@ -624,7 +633,8 @@ struct proc_dir_entry *proc_symlink(cons
kfree(ent->data);
kfree(ent);
ent = NULL;
- }
+ } else
+ ent->vx_flags = IATTR_PROC_SYMLINK;
} else {
kfree(ent);
ent = NULL;
--- a/fs/proc/inode.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/proc/inode.c 2008-04-19 15:14:52.000000000 -0400
@@ -408,6 +408,8 @@ struct inode *proc_get_inode(struct supe
inode->i_uid = de->uid;
inode->i_gid = de->gid;
}
+ if (de->vx_flags)
+ PROC_I(inode)->vx_flags = de->vx_flags;
if (de->size)
inode->i_size = de->size;
if (de->nlink)
--- a/fs/proc/internal.h 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/proc/internal.h 2008-05-21 14:08:19.000000000 -0400
@@ -10,6 +10,7 @@
*/
#include <linux/proc_fs.h>
+#include <linux/vs_pid.h>
#ifdef CONFIG_PROC_SYSCTL
extern int proc_sys_init(void);
@@ -57,6 +58,9 @@ extern int proc_pid_status(struct seq_fi
struct pid *pid, struct task_struct *task);
extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
struct pid *pid, struct task_struct *task);
+extern int proc_pid_nsproxy(struct seq_file *m, struct pid_namespace *ns,
+ struct pid *pid, struct task_struct *task);
+
extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
extern const struct file_operations proc_maps_operations;
@@ -76,11 +80,16 @@ static inline struct pid *proc_pid(struc
return PROC_I(inode)->pid;
}
-static inline struct task_struct *get_proc_task(struct inode *inode)
+static inline struct task_struct *get_proc_task_real(struct inode *inode)
{
return get_pid_task(proc_pid(inode), PIDTYPE_PID);
}
+static inline struct task_struct *get_proc_task(struct inode *inode)
+{
+ return vx_get_proc_task(inode, proc_pid(inode));
+}
+
static inline int proc_fd(struct inode *inode)
{
return PROC_I(inode)->fd;
--- a/fs/proc/proc_misc.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/proc/proc_misc.c 2008-04-23 11:56:24.000000000 -0400
@@ -56,6 +56,8 @@
#include <asm/div64.h>
#include "internal.h"
+#include <linux/vs_cvirt.h>
+
#define LOAD_INT(x) ((x) >> FSHIFT)
#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
/*
@@ -83,22 +85,38 @@ static int proc_calc_metrics(char *page,
static int loadavg_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
+ unsigned long running;
+ unsigned int threads;
int a, b, c;
int len;
unsigned long seq;
do {
seq = read_seqbegin(&xtime_lock);
- a = avenrun[0] + (FIXED_1/200);
- b = avenrun[1] + (FIXED_1/200);
- c = avenrun[2] + (FIXED_1/200);
+ if (vx_flags(VXF_VIRT_LOAD, 0)) {
+ struct vx_info *vxi = current->vx_info;
+
+ a = vxi->cvirt.load[0] + (FIXED_1/200);
+ b = vxi->cvirt.load[1] + (FIXED_1/200);
+ c = vxi->cvirt.load[2] + (FIXED_1/200);
+
+ running = atomic_read(&vxi->cvirt.nr_running);
+ threads = atomic_read(&vxi->cvirt.nr_threads);
+ } else {
+ a = avenrun[0] + (FIXED_1/200);
+ b = avenrun[1] + (FIXED_1/200);
+ c = avenrun[2] + (FIXED_1/200);
+
+ running = nr_running();
+ threads = nr_threads;
+ }
} while (read_seqretry(&xtime_lock, seq));
len = sprintf(page,"%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
LOAD_INT(a), LOAD_FRAC(a),
LOAD_INT(b), LOAD_FRAC(b),
LOAD_INT(c), LOAD_FRAC(c),
- nr_running(), nr_threads,
+ running, threads,
task_active_pid_ns(current)->last_pid);
return proc_calc_metrics(page, start, off, count, eof, len);
}
@@ -114,6 +132,9 @@ static int uptime_read_proc(char *page,
do_posix_clock_monotonic_gettime(&uptime);
monotonic_to_bootbased(&uptime);
cputime_to_timespec(idletime, &idle);
+ if (vx_flags(VXF_VIRT_UPTIME, 0))
+ vx_vsi_uptime(&uptime, &idle);
+
len = sprintf(page,"%lu.%02lu %lu.%02lu\n",
(unsigned long) uptime.tv_sec,
(uptime.tv_nsec / (NSEC_PER_SEC / 100)),
@@ -145,7 +166,7 @@ static int meminfo_read_proc(char *page,
cached = global_page_state(NR_FILE_PAGES) -
total_swapcache_pages - i.bufferram;
- if (cached < 0)
+ if (cached < 0 || vx_flags(VXF_VIRT_MEM, 0))
cached = 0;
get_vmalloc_info(&vmi);
--- a/fs/proc/root.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/proc/root.c 2008-04-19 15:14:52.000000000 -0400
@@ -23,6 +23,9 @@
#include "internal.h"
struct proc_dir_entry *proc_bus, *proc_root_fs, *proc_root_driver;
+struct proc_dir_entry *proc_virtual;
+
+extern void proc_vx_init(void);
static int proc_test_super(struct super_block *sb, void *data)
{
@@ -138,6 +141,7 @@ void __init proc_root_init(void)
proc_device_tree_init();
#endif
proc_bus = proc_mkdir("bus", NULL);
+ proc_vx_init();
proc_sys_init();
}
--- a/fs/quota.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/quota.c 2008-04-21 09:21:04.000000000 -0400
@@ -18,6 +18,7 @@
#include <linux/capability.h>
#include <linux/quotaops.h>
#include <linux/types.h>
+#include <linux/vs_context.h>
/* Check validity of generic quotactl commands */
static int generic_quotactl_valid(struct super_block *sb, int type, int cmd, qid_t id)
@@ -82,11 +83,11 @@ static int generic_quotactl_valid(struct
if (cmd == Q_GETQUOTA) {
if (((type == USRQUOTA && current->euid != id) ||
(type == GRPQUOTA && !in_egroup_p(id))) &&
- !capable(CAP_SYS_ADMIN))
+ !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
return -EPERM;
}
else if (cmd != Q_GETFMT && cmd != Q_SYNC && cmd != Q_GETINFO)
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
return -EPERM;
return 0;
@@ -133,10 +134,10 @@ static int xqm_quotactl_valid(struct sup
if (cmd == Q_XGETQUOTA) {
if (((type == XQM_USRQUOTA && current->euid != id) ||
(type == XQM_GRPQUOTA && !in_egroup_p(id))) &&
- !capable(CAP_SYS_ADMIN))
+ !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
return -EPERM;
} else if (cmd != Q_XGETQSTAT && cmd != Q_XQUOTASYNC) {
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
return -EPERM;
}
@@ -329,6 +330,46 @@ static int do_quotactl(struct super_bloc
return 0;
}
+#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
+
+#include <linux/vroot.h>
+#include <linux/major.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/vserver/debug.h>
+
+static vroot_grb_func *vroot_get_real_bdev = NULL;
+
+static spinlock_t vroot_grb_lock = SPIN_LOCK_UNLOCKED;
+
+int register_vroot_grb(vroot_grb_func *func) {
+ int ret = -EBUSY;
+
+ spin_lock(&vroot_grb_lock);
+ if (!vroot_get_real_bdev) {
+ vroot_get_real_bdev = func;
+ ret = 0;
+ }
+ spin_unlock(&vroot_grb_lock);
+ return ret;
+}
+EXPORT_SYMBOL(register_vroot_grb);
+
+int unregister_vroot_grb(vroot_grb_func *func) {
+ int ret = -EINVAL;
+
+ spin_lock(&vroot_grb_lock);
+ if (vroot_get_real_bdev) {
+ vroot_get_real_bdev = NULL;
+ ret = 0;
+ }
+ spin_unlock(&vroot_grb_lock);
+ return ret;
+}
+EXPORT_SYMBOL(unregister_vroot_grb);
+
+#endif
+
/*
* look up a superblock on which quota ops will be performed
* - use the name of a block device to find the superblock thereon
@@ -346,6 +387,22 @@ static inline struct super_block *quotac
putname(tmp);
if (IS_ERR(bdev))
return ERR_CAST(bdev);
+#if defined(CONFIG_BLK_DEV_VROOT) || defined(CONFIG_BLK_DEV_VROOT_MODULE)
+ if (bdev && bdev->bd_inode &&
+ imajor(bdev->bd_inode) == VROOT_MAJOR) {
+ struct block_device *bdnew = (void *)-EINVAL;
+
+ if (vroot_get_real_bdev)
+ bdnew = vroot_get_real_bdev(bdev);
+ else
+ vxdprintk(VXD_CBIT(misc, 0),
+ "vroot_get_real_bdev not set");
+ bdput(bdev);
+ if (IS_ERR(bdnew))
+ return ERR_PTR(PTR_ERR(bdnew));
+ bdev = bdnew;
+ }
+#endif
sb = get_super(bdev);
bdput(bdev);
if (!sb)
--- a/fs/reiserfs/bitmap.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/reiserfs/bitmap.c 2008-04-19 15:14:52.000000000 -0400
@@ -13,6 +13,7 @@
#include <linux/reiserfs_fs_sb.h>
#include <linux/reiserfs_fs_i.h>
#include <linux/quotaops.h>
+#include <linux/vs_dlimit.h>
#define PREALLOCATION_SIZE 9
@@ -429,8 +430,10 @@ static void _reiserfs_free_block(struct
set_sb_free_blocks(rs, sb_free_blocks(rs) + 1);
journal_mark_dirty(th, s, sbh);
- if (for_unformatted)
+ if (for_unformatted) {
+ DLIMIT_FREE_BLOCK(inode, 1);
DQUOT_FREE_BLOCK_NODIRTY(inode, 1);
+ }
}
void reiserfs_free_block(struct reiserfs_transaction_handle *th,
@@ -1045,6 +1048,7 @@ static inline int blocknrs_and_prealloc_
b_blocknr_t finish = SB_BLOCK_COUNT(s) - 1;
int passno = 0;
int nr_allocated = 0;
+ int blocks;
determine_prealloc_size(hint);
if (!hint->formatted_node) {
@@ -1054,19 +1058,30 @@ static inline int blocknrs_and_prealloc_
"reiserquota: allocating %d blocks id=%u",
amount_needed, hint->inode->i_uid);
#endif
- quota_ret =
- DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode, amount_needed);
- if (quota_ret) /* Quota exceeded? */
+ quota_ret = DQUOT_ALLOC_BLOCK_NODIRTY(hint->inode,
+ amount_needed);
+ if (quota_ret)
return QUOTA_EXCEEDED;
+ if (DLIMIT_ALLOC_BLOCK(hint->inode, amount_needed)) {
+ DQUOT_FREE_BLOCK_NODIRTY(hint->inode,
+ amount_needed);
+ return NO_DISK_SPACE;
+ }
+
if (hint->preallocate && hint->prealloc_size) {
#ifdef REISERQUOTA_DEBUG
reiserfs_debug(s, REISERFS_DEBUG_CODE,
"reiserquota: allocating (prealloc) %d blocks id=%u",
hint->prealloc_size, hint->inode->i_uid);
#endif
- quota_ret =
- DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
- hint->prealloc_size);
+ quota_ret = DQUOT_PREALLOC_BLOCK_NODIRTY(hint->inode,
+ hint->prealloc_size);
+ if (!quota_ret &&
+ DLIMIT_ALLOC_BLOCK(hint->inode, hint->prealloc_size)) {
+ DQUOT_FREE_BLOCK_NODIRTY(hint->inode,
+ hint->prealloc_size);
+ quota_ret = 1;
+ }
if (quota_ret)
hint->preallocate = hint->prealloc_size = 0;
}
@@ -1098,7 +1113,10 @@ static inline int blocknrs_and_prealloc_
nr_allocated,
hint->inode->i_uid);
#endif
- DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed + hint->prealloc_size - nr_allocated); /* Free not allocated blocks */
+ /* Free not allocated blocks */
+ blocks = amount_needed + hint->prealloc_size - nr_allocated;
+ DLIMIT_FREE_BLOCK(hint->inode, blocks);
+ DQUOT_FREE_BLOCK_NODIRTY(hint->inode, blocks);
}
while (nr_allocated--)
reiserfs_free_block(hint->th, hint->inode,
@@ -1129,10 +1147,10 @@ static inline int blocknrs_and_prealloc_
REISERFS_I(hint->inode)->i_prealloc_count,
hint->inode->i_uid);
#endif
- DQUOT_FREE_BLOCK_NODIRTY(hint->inode, amount_needed +
- hint->prealloc_size - nr_allocated -
- REISERFS_I(hint->inode)->
- i_prealloc_count);
+ blocks = amount_needed + hint->prealloc_size - nr_allocated -
+ REISERFS_I(hint->inode)->i_prealloc_count;
+ DLIMIT_FREE_BLOCK(hint->inode, blocks);
+ DQUOT_FREE_BLOCK_NODIRTY(hint->inode, blocks);
}
return CARRY_ON;
--- a/fs/reiserfs/file.c 2008-04-17 11:31:38.000000000 -0400
+++ a/fs/reiserfs/file.c 2008-04-19 15:14:52.000000000 -0400
@@ -306,4 +306,5 @@ const struct inode_operations reiserfs_f
.listxattr = reiserfs_listxattr,
.removexattr = reiserfs_removexattr,
.permission = reiserfs_permission,
+ .sync_flags = reiserfs_sync_flags,
};
--- a/fs/reiserfs/inode.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/reiserfs/inode.c 2008-04-19 15:14:52.000000000 -0400
@@ -18,6 +18,8 @@
#include <linux/writeback.h>
#include <linux/quotaops.h>
#include <linux/swap.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vs_tag.h>
int reiserfs_commit_write(struct file *f, struct page *page,
unsigned from, unsigned to);
@@ -52,6 +54,7 @@ void reiserfs_delete_inode(struct inode
* stat data deletion */
if (!err)
DQUOT_FREE_INODE(inode);
+ DLIMIT_FREE_INODE(inode);
if (journal_end(&th, inode->i_sb, jbegin_count))
goto out;
@@ -1114,6 +1117,8 @@ static void init_inode(struct inode *ino
struct buffer_head *bh;
struct item_head *ih;
__u32 rdev;
+ uid_t uid;
+ gid_t gid;
//int version = ITEM_VERSION_1;
bh = PATH_PLAST_BUFFER(path);
@@ -1137,12 +1142,13 @@ static void init_inode(struct inode *ino
(struct stat_data_v1 *)B_I_PITEM(bh, ih);
unsigned long blocks;
+ uid = sd_v1_uid(sd);
+ gid = sd_v1_gid(sd);
+
set_inode_item_key_version(inode, KEY_FORMAT_3_5);
set_inode_sd_version(inode, STAT_DATA_V1);
inode->i_mode = sd_v1_mode(sd);
inode->i_nlink = sd_v1_nlink(sd);
- inode->i_uid = sd_v1_uid(sd);
- inode->i_gid = sd_v1_gid(sd);
inode->i_size = sd_v1_size(sd);
inode->i_atime.tv_sec = sd_v1_atime(sd);
inode->i_mtime.tv_sec = sd_v1_mtime(sd);
@@ -1184,11 +1190,12 @@ static void init_inode(struct inode *ino
// (directories and symlinks)
struct stat_data *sd = (struct stat_data *)B_I_PITEM(bh, ih);
+ uid = sd_v2_uid(sd);
+ gid = sd_v2_gid(sd);
+
inode->i_mode = sd_v2_mode(sd);
inode->i_nlink = sd_v2_nlink(sd);
- inode->i_uid = sd_v2_uid(sd);
inode->i_size = sd_v2_size(sd);
- inode->i_gid = sd_v2_gid(sd);
inode->i_mtime.tv_sec = sd_v2_mtime(sd);
inode->i_atime.tv_sec = sd_v2_atime(sd);
inode->i_ctime.tv_sec = sd_v2_ctime(sd);
@@ -1218,6 +1225,10 @@ static void init_inode(struct inode *ino
sd_attrs_to_i_attrs(sd_v2_attrs(sd), inode);
}
+ inode->i_uid = INOTAG_UID(DX_TAG(inode), uid, gid);
+ inode->i_gid = INOTAG_GID(DX_TAG(inode), uid, gid);
+ inode->i_tag = INOTAG_TAG(DX_TAG(inode), uid, gid, 0);
+
pathrelse(path);
if (S_ISREG(inode->i_mode)) {
inode->i_op = &reiserfs_file_inode_operations;
@@ -1240,13 +1251,15 @@ static void init_inode(struct inode *ino
static void inode2sd(void *sd, struct inode *inode, loff_t size)
{
struct stat_data *sd_v2 = (struct stat_data *)sd;
+ uid_t uid = TAGINO_UID(DX_TAG(inode), inode->i_uid, inode->i_tag);
+ gid_t gid = TAGINO_GID(DX_TAG(inode), inode->i_gid, inode->i_tag);
__u16 flags;
+ set_sd_v2_uid(sd_v2, uid);
+ set_sd_v2_gid(sd_v2, gid);
set_sd_v2_mode(sd_v2, inode->i_mode);
set_sd_v2_nlink(sd_v2, inode->i_nlink);
- set_sd_v2_uid(sd_v2, inode->i_uid);
set_sd_v2_size(sd_v2, size);
- set_sd_v2_gid(sd_v2, inode->i_gid);
set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec);
set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec);
set_sd_v2_ctime(sd_v2, inode->i_ctime.tv_sec);
@@ -1769,6 +1782,10 @@ int reiserfs_new_inode(struct reiserfs_t
BUG_ON(!th->t_trans_id);
+ if (DLIMIT_ALLOC_INODE(inode)) {
+ err = -ENOSPC;
+ goto out_bad_dlimit;
+ }
if (DQUOT_ALLOC_INODE(inode)) {
err = -EDQUOT;
goto out_end_trans;
@@ -1954,6 +1971,9 @@ int reiserfs_new_inode(struct reiserfs_t
DQUOT_FREE_INODE(inode);
out_end_trans:
+ DLIMIT_FREE_INODE(inode);
+
+ out_bad_dlimit:
journal_end(th, th->t_super, th->t_blocks_allocated);
/* Drop can be outside and it needs more credits so it's better to have it outside */
DQUOT_DROP(inode);
@@ -2848,6 +2868,14 @@ void sd_attrs_to_i_attrs(__u16 sd_attrs,
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= ~S_IMMUTABLE;
+ if (sd_attrs & REISERFS_IUNLINK_FL)
+ inode->i_flags |= S_IUNLINK;
+ else
+ inode->i_flags &= ~S_IUNLINK;
+ if (sd_attrs & REISERFS_BARRIER_FL)
+ inode->i_flags |= S_BARRIER;
+ else
+ inode->i_flags &= ~S_BARRIER;
if (sd_attrs & REISERFS_APPEND_FL)
inode->i_flags |= S_APPEND;
else
@@ -2870,6 +2898,14 @@ void i_attrs_to_sd_attrs(struct inode *i
*sd_attrs |= REISERFS_IMMUTABLE_FL;
else
*sd_attrs &= ~REISERFS_IMMUTABLE_FL;
+ if (inode->i_flags & S_IUNLINK)
+ *sd_attrs |= REISERFS_IUNLINK_FL;
+ else
+ *sd_attrs &= ~REISERFS_IUNLINK_FL;
+ if (inode->i_flags & S_BARRIER)
+ *sd_attrs |= REISERFS_BARRIER_FL;
+ else
+ *sd_attrs &= ~REISERFS_BARRIER_FL;
if (inode->i_flags & S_SYNC)
*sd_attrs |= REISERFS_SYNC_FL;
else
@@ -3049,6 +3085,22 @@ static ssize_t reiserfs_direct_IO(int rw
reiserfs_get_blocks_direct_io, NULL);
}
+int reiserfs_sync_flags(struct inode *inode)
+{
+ u16 oldflags, newflags;
+
+ oldflags = REISERFS_I(inode)->i_attrs;
+ newflags = oldflags;
+ i_attrs_to_sd_attrs(inode, &newflags);
+
+ if (oldflags ^ newflags) {
+ REISERFS_I(inode)->i_attrs = newflags;
+ inode->i_ctime = CURRENT_TIME_SEC;
+ mark_inode_dirty(inode);
+ }
+ return 0;
+}
+
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
@@ -3102,9 +3154,11 @@ int reiserfs_setattr(struct dentry *dent
}
error = inode_change_ok(inode, attr);
+
if (!error) {
if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
- (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid)) {
+ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid) ||
+ (ia_valid & ATTR_TAG && attr->ia_tag != inode->i_tag)) {
error = reiserfs_chown_xattrs(inode, attr);
if (!error) {
@@ -3134,6 +3188,9 @@ int reiserfs_setattr(struct dentry *dent
inode->i_uid = attr->ia_uid;
if (attr->ia_valid & ATTR_GID)
inode->i_gid = attr->ia_gid;
+ if ((attr->ia_valid & ATTR_TAG) &&
+ IS_TAGGED(inode))
+ inode->i_tag = attr->ia_tag;
mark_inode_dirty(inode);
error =
journal_end(&th, inode->i_sb, jbegin_count);
--- a/fs/reiserfs/ioctl.c 2008-05-21 14:30:05.000000000 -0400
+++ a/fs/reiserfs/ioctl.c 2008-05-21 14:30:41.000000000 -0400
@@ -6,6 +6,7 @@
#include <linux/fs.h>
#include <linux/reiserfs_fs.h>
#include <linux/time.h>
+#include <linux/mount.h>
#include <asm/uaccess.h>
#include <linux/pagemap.h>
#include <linux/smp_lock.h>
@@ -22,7 +23,7 @@
int reiserfs_ioctl(struct inode *inode, struct file *filp, unsigned int cmd,
unsigned long arg)
{
- unsigned int flags;
+ unsigned int flags, oldflags;
switch (cmd) {
case REISERFS_IOC_UNPACK:
@@ -41,12 +42,14 @@ int reiserfs_ioctl(struct inode *inode,
flags = REISERFS_I(inode)->i_attrs;
i_attrs_to_sd_attrs(inode, (__u16 *) & flags);
+ flags &= REISERFS_FL_USER_VISIBLE;
return put_user(flags, (int __user *)arg);
case REISERFS_IOC_SETFLAGS:{
if (!reiserfs_attrs(inode->i_sb))
return -ENOTTY;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (!is_owner_or_cap(inode))
@@ -72,6 +75,10 @@ int reiserfs_ioctl(struct inode *inode,
if (result)
return result;
}
+
+ oldflags = REISERFS_I(inode)->i_attrs;
+ flags = flags & REISERFS_FL_USER_MODIFIABLE;
+ flags |= oldflags & ~REISERFS_FL_USER_MODIFIABLE;
sd_attrs_to_i_attrs(flags, inode);
REISERFS_I(inode)->i_attrs = flags;
inode->i_ctime = CURRENT_TIME_SEC;
@@ -83,7 +90,8 @@ int reiserfs_ioctl(struct inode *inode,
case REISERFS_IOC_SETVERSION:
if (!is_owner_or_cap(inode))
return -EPERM;
- if (IS_RDONLY(inode))
+ if (IS_RDONLY(inode) ||
+ (filp && MNT_IS_RDONLY(filp->f_vfsmnt)))
return -EROFS;
if (get_user(inode->i_generation, (int __user *)arg))
return -EFAULT;
--- a/fs/reiserfs/namei.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/reiserfs/namei.c 2008-04-19 15:14:52.000000000 -0400
@@ -17,6 +17,7 @@
#include <linux/reiserfs_acl.h>
#include <linux/reiserfs_xattr.h>
#include <linux/quotaops.h>
+#include <linux/vs_tag.h>
#define INC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) { inc_nlink(i); if (i->i_nlink >= REISERFS_LINK_MAX) i->i_nlink=1; }
#define DEC_DIR_INODE_NLINK(i) if (i->i_nlink != 1) drop_nlink(i);
@@ -360,6 +361,7 @@ static struct dentry *reiserfs_lookup(st
reiserfs_write_unlock(dir->i_sb);
return ERR_PTR(-EACCES);
}
+ dx_propagate_tag(nd, inode);
/* Propogate the priv_object flag so we know we're in the priv tree */
if (is_reiserfs_priv_object(dir))
@@ -595,6 +597,7 @@ static int new_inode_init(struct inode *
} else {
inode->i_gid = current->fsgid;
}
+ inode->i_tag = dx_current_fstag(inode->i_sb);
DQUOT_INIT(inode);
return 0;
}
@@ -1541,6 +1544,7 @@ const struct inode_operations reiserfs_d
.listxattr = reiserfs_listxattr,
.removexattr = reiserfs_removexattr,
.permission = reiserfs_permission,
+ .sync_flags = reiserfs_sync_flags,
};
/*
@@ -1557,6 +1561,7 @@ const struct inode_operations reiserfs_s
.listxattr = reiserfs_listxattr,
.removexattr = reiserfs_removexattr,
.permission = reiserfs_permission,
+ .sync_flags = reiserfs_sync_flags,
};
@@ -1570,5 +1575,6 @@ const struct inode_operations reiserfs_s
.listxattr = reiserfs_listxattr,
.removexattr = reiserfs_removexattr,
.permission = reiserfs_permission,
+ .sync_flags = reiserfs_sync_flags,
};
--- a/fs/reiserfs/stree.c 2008-04-17 11:31:38.000000000 -0400
+++ a/fs/reiserfs/stree.c 2008-04-19 15:14:52.000000000 -0400
@@ -55,6 +55,7 @@
#include <linux/reiserfs_fs.h>
#include <linux/buffer_head.h>
#include <linux/quotaops.h>
+#include <linux/vs_dlimit.h>
/* Does the buffer contain a disk block which is in the tree. */
inline int B_IS_IN_TREE(const struct buffer_head *p_s_bh)
@@ -1297,6 +1298,7 @@ int reiserfs_delete_item(struct reiserfs
"reiserquota delete_item(): freeing %u, id=%u type=%c",
quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
#endif
+ DLIMIT_FREE_SPACE(p_s_inode, quota_cut_bytes);
DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
/* Return deleted body length */
@@ -1385,6 +1387,7 @@ void reiserfs_delete_solid_item(struct r
#endif
DQUOT_FREE_SPACE_NODIRTY(inode,
quota_cut_bytes);
+ DLIMIT_FREE_SPACE(inode, quota_cut_bytes);
}
break;
}
@@ -1735,6 +1738,7 @@ int reiserfs_cut_from_item(struct reiser
"reiserquota cut_from_item(): freeing %u id=%u type=%c",
quota_cut_bytes, p_s_inode->i_uid, '?');
#endif
+ DLIMIT_FREE_SPACE(p_s_inode, quota_cut_bytes);
DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
return n_ret_value;
}
@@ -1976,6 +1980,11 @@ int reiserfs_paste_into_item(struct reis
pathrelse(p_s_search_path);
return -EDQUOT;
}
+ if (DLIMIT_ALLOC_SPACE(inode, n_pasted_size)) {
+ DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
+ pathrelse(p_s_search_path);
+ return -ENOSPC;
+ }
init_tb_struct(th, &s_paste_balance, th->t_super, p_s_search_path,
n_pasted_size);
#ifdef DISPLACE_NEW_PACKING_LOCALITIES
@@ -2028,6 +2037,7 @@ int reiserfs_paste_into_item(struct reis
n_pasted_size, inode->i_uid,
key2type(&(p_s_key->on_disk_key)));
#endif
+ DLIMIT_FREE_SPACE(inode, n_pasted_size);
DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
return retval;
}
@@ -2065,6 +2075,11 @@ int reiserfs_insert_item(struct reiserfs
pathrelse(p_s_path);
return -EDQUOT;
}
+ if (DLIMIT_ALLOC_SPACE(inode, quota_bytes)) {
+ DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes);
+ pathrelse(p_s_path);
+ return -ENOSPC;
+ }
}
init_tb_struct(th, &s_ins_balance, th->t_super, p_s_path,
IH_SIZE + ih_item_len(p_s_ih));
@@ -2112,7 +2127,9 @@ int reiserfs_insert_item(struct reiserfs
"reiserquota insert_item(): freeing %u id=%u type=%c",
quota_bytes, inode->i_uid, head2type(p_s_ih));
#endif
- if (inode)
+ if (inode) {
+ DLIMIT_FREE_SPACE(inode, quota_bytes);
DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes);
+ }
return retval;
}
--- a/fs/reiserfs/super.c 2008-05-21 14:30:05.000000000 -0400
+++ a/fs/reiserfs/super.c 2008-05-21 14:30:41.000000000 -0400
@@ -896,6 +896,14 @@ static int reiserfs_parse_options(struct
{"user_xattr",.setmask = 1 << REISERFS_UNSUPPORTED_OPT},
{"nouser_xattr",.clrmask = 1 << REISERFS_UNSUPPORTED_OPT},
#endif
+#ifndef CONFIG_TAGGING_NONE
+ {"tagxid",.setmask = 1 << REISERFS_TAGGED},
+ {"tag",.setmask = 1 << REISERFS_TAGGED},
+ {"notag",.clrmask = 1 << REISERFS_TAGGED},
+#endif
+#ifdef CONFIG_PROPAGATE
+ {"tag",.arg_required = 'T',.values = NULL},
+#endif
#ifdef CONFIG_REISERFS_FS_POSIX_ACL
{"acl",.setmask = 1 << REISERFS_POSIXACL},
{"noacl",.clrmask = 1 << REISERFS_POSIXACL},
@@ -1159,6 +1167,12 @@ static int reiserfs_remount(struct super
goto out_err;
}
+ if ((mount_options & (1 << REISERFS_TAGGED)) &&
+ !(s->s_flags & MS_TAGGED)) {
+ reiserfs_warning(s, "reiserfs: tagging not permitted on remount.");
+ return -EINVAL;
+ }
+
handle_attrs(s);
/* Add options that are safe here */
@@ -1618,6 +1632,10 @@ static int reiserfs_fill_super(struct su
goto error;
}
+ /* map mount option tagxid */
+ if (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_TAGGED))
+ s->s_flags |= MS_TAGGED;
+
rs = SB_DISK_SUPER_BLOCK(s);
/* Let's do basic sanity check to verify that underlying device is not
smaller than the filesystem. If the check fails then abort and scream,
--- a/fs/reiserfs/xattr.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/reiserfs/xattr.c 2008-04-19 15:14:52.000000000 -0400
@@ -35,6 +35,7 @@
#include <linux/namei.h>
#include <linux/errno.h>
#include <linux/fs.h>
+#include <linux/mount.h>
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/xattr.h>
@@ -747,7 +748,7 @@ int reiserfs_delete_xattrs(struct inode
if (dir->d_inode->i_nlink <= 2) {
root = get_xa_root(inode->i_sb, XATTR_REPLACE);
reiserfs_write_lock_xattrs(inode->i_sb);
- err = vfs_rmdir(root->d_inode, dir);
+ err = vfs_rmdir(root->d_inode, dir, NULL);
reiserfs_write_unlock_xattrs(inode->i_sb);
dput(root);
} else {
--- a/fs/stat.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/stat.c 2008-04-19 15:14:52.000000000 -0400
@@ -26,6 +26,7 @@ void generic_fillattr(struct inode *inod
stat->nlink = inode->i_nlink;
stat->uid = inode->i_uid;
stat->gid = inode->i_gid;
+ stat->tag = inode->i_tag;
stat->rdev = inode->i_rdev;
stat->atime = inode->i_atime;
stat->mtime = inode->i_mtime;
--- a/fs/super.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/super.c 2008-04-19 15:14:52.000000000 -0400
@@ -37,6 +37,9 @@
#include <linux/idr.h>
#include <linux/kobject.h>
#include <linux/mutex.h>
+#include <linux/devpts_fs.h>
+#include <linux/proc_fs.h>
+#include <linux/vs_context.h>
#include <asm/uaccess.h>
@@ -859,12 +862,18 @@ struct vfsmount *
vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void *data)
{
struct vfsmount *mnt;
+ struct super_block *sb;
char *secdata = NULL;
int error;
if (!type)
return ERR_PTR(-ENODEV);
+ error = -EPERM;
+ if ((type->fs_flags & FS_BINARY_MOUNTDATA) &&
+ !vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT))
+ goto out;
+
error = -ENOMEM;
mnt = alloc_vfsmnt(name);
if (!mnt)
@@ -885,7 +894,14 @@ vfs_kern_mount(struct file_system_type *
goto out_free_secdata;
BUG_ON(!mnt->mnt_sb);
- error = security_sb_kern_mount(mnt->mnt_sb, secdata);
+ sb = mnt->mnt_sb;
+ error = -EPERM;
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_BINARY_MOUNT) && !sb->s_bdev &&
+ (sb->s_magic != PROC_SUPER_MAGIC) &&
+ (sb->s_magic != DEVPTS_SUPER_MAGIC))
+ goto out_sb;
+
+ error = security_sb_kern_mount(sb, secdata);
if (error)
goto out_sb;
--- a/fs/sysfs/mount.c 2008-04-17 11:31:38.000000000 -0400
+++ a/fs/sysfs/mount.c 2008-04-19 15:14:52.000000000 -0400
@@ -19,8 +19,6 @@
#include "sysfs.h"
-/* Random magic number */
-#define SYSFS_MAGIC 0x62656572
static struct vfsmount *sysfs_mount;
struct super_block * sysfs_sb = NULL;
@@ -46,7 +44,7 @@ static int sysfs_fill_super(struct super
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = SYSFS_MAGIC;
+ sb->s_magic = SYSFS_SUPER_MAGIC;
sb->s_op = &sysfs_ops;
sb->s_time_gran = 1;
sysfs_sb = sb;
--- a/fs/utimes.c 2008-05-21 14:30:05.000000000 -0400
+++ a/fs/utimes.c 2008-05-21 14:30:41.000000000 -0400
@@ -7,6 +7,8 @@
#include <linux/stat.h>
#include <linux/utime.h>
#include <linux/syscalls.h>
+#include <linux/mount.h>
+#include <linux/vs_cowbl.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -89,6 +91,9 @@ long do_utimes(int dfd, char __user *fil
if (error)
goto out;
+ error = cow_check_and_break(&nd);
+ if (error)
+ goto dput_and_out;
dentry = nd.path.dentry;
}
--- a/fs/xattr.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/xattr.c 2008-04-21 13:52:50.000000000 -0400
@@ -17,6 +17,7 @@
#include <linux/module.h>
#include <linux/fsnotify.h>
#include <linux/audit.h>
+#include <linux/mount.h>
#include <asm/uaccess.h>
@@ -220,7 +221,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
*/
static long
setxattr(struct dentry *d, char __user *name, void __user *value,
- size_t size, int flags)
+ size_t size, int flags, struct vfsmount *mnt)
{
int error;
void *kvalue = NULL;
@@ -247,6 +248,9 @@ setxattr(struct dentry *d, char __user *
}
}
+ if (MNT_IS_RDONLY(mnt))
+ return -EROFS;
+
error = vfs_setxattr(d, kname, kvalue, size, flags);
kfree(kvalue);
return error;
@@ -262,7 +266,7 @@ sys_setxattr(char __user *path, char __u
error = user_path_walk(path, &nd);
if (error)
return error;
- error = setxattr(nd.path.dentry, name, value, size, flags);
+ error = setxattr(nd.path.dentry, name, value, size, flags, nd.path.mnt);
path_put(&nd.path);
return error;
}
@@ -277,7 +281,7 @@ sys_lsetxattr(char __user *path, char __
error = user_path_walk_link(path, &nd);
if (error)
return error;
- error = setxattr(nd.path.dentry, name, value, size, flags);
+ error = setxattr(nd.path.dentry, name, value, size, flags, nd.path.mnt);
path_put(&nd.path);
return error;
}
@@ -295,7 +299,7 @@ sys_fsetxattr(int fd, char __user *name,
return error;
dentry = f->f_path.dentry;
audit_inode(NULL, dentry);
- error = setxattr(dentry, name, value, size, flags);
+ error = setxattr(dentry, name, value, size, flags, f->f_vfsmnt);
fput(f);
return error;
}
@@ -459,7 +463,7 @@ sys_flistxattr(int fd, char __user *list
* Extended attribute REMOVE operations
*/
static long
-removexattr(struct dentry *d, char __user *name)
+removexattr(struct dentry *d, char __user *name, struct vfsmount *mnt)
{
int error;
char kname[XATTR_NAME_MAX + 1];
@@ -470,6 +474,9 @@ removexattr(struct dentry *d, char __use
if (error < 0)
return error;
+ if (MNT_IS_RDONLY(mnt))
+ return -EROFS;
+
return vfs_removexattr(d, kname);
}
@@ -482,7 +489,7 @@ sys_removexattr(char __user *path, char
error = user_path_walk(path, &nd);
if (error)
return error;
- error = removexattr(nd.path.dentry, name);
+ error = removexattr(nd.path.dentry, name, nd.path.mnt);
path_put(&nd.path);
return error;
}
@@ -496,7 +503,7 @@ sys_lremovexattr(char __user *path, char
error = user_path_walk_link(path, &nd);
if (error)
return error;
- error = removexattr(nd.path.dentry, name);
+ error = removexattr(nd.path.dentry, name, nd.path.mnt);
path_put(&nd.path);
return error;
}
@@ -513,7 +520,7 @@ sys_fremovexattr(int fd, char __user *na
return error;
dentry = f->f_path.dentry;
audit_inode(NULL, dentry);
- error = removexattr(dentry, name);
+ error = removexattr(dentry, name, f->f_vfsmnt);
fput(f);
return error;
}
--- a/fs/xfs/linux-2.6/xfs_ioctl.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/xfs/linux-2.6/xfs_ioctl.c 2008-04-19 15:14:52.000000000 -0400
@@ -1125,7 +1125,8 @@ xfs_merge_ioc_xflags(
STATIC unsigned int
xfs_di2lxflags(
- __uint16_t di_flags)
+ __uint16_t di_flags,
+ __uint16_t di_vflags)
{
unsigned int flags = 0;
@@ -1139,6 +1140,11 @@ xfs_di2lxflags(
flags |= FS_NOATIME_FL;
if (di_flags & XFS_DIFLAG_NODUMP)
flags |= FS_NODUMP_FL;
+
+ if (di_vflags & XFS_DIVFLAG_IUNLINK)
+ flags |= FS_IUNLINK_FL;
+ if (di_vflags & XFS_DIVFLAG_BARRIER)
+ flags |= FS_BARRIER_FL;
return flags;
}
@@ -1219,7 +1225,7 @@ xfs_ioc_xattr(
}
case XFS_IOC_GETXFLAGS: {
- flags = xfs_di2lxflags(ip->i_d.di_flags);
+ flags = xfs_di2lxflags(ip->i_d.di_flags, ip->i_d.di_vflags);
if (copy_to_user(arg, &flags, sizeof(flags)))
error = -EFAULT;
break;
--- a/fs/xfs/linux-2.6/xfs_iops.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/xfs/linux-2.6/xfs_iops.c 2008-04-19 17:01:15.000000000 -0400
@@ -53,6 +53,7 @@
#include <linux/namei.h>
#include <linux/security.h>
#include <linux/falloc.h>
+#include <linux/vs_tag.h>
/*
* Bring the atime in the XFS inode uptodate.
@@ -391,6 +392,7 @@ xfs_vn_lookup(
d_add(dentry, NULL);
return NULL;
}
+ dx_propagate_tag(nd, vn_to_inode(cvp));
return d_splice_alias(vn_to_inode(cvp), dentry);
}
@@ -655,6 +657,10 @@ xfs_vn_setattr(
int flags = 0;
int error;
+ error = inode_change_ok(inode, attr);
+ if (error)
+ return error;
+
if (ia_valid & ATTR_UID) {
vattr.va_mask |= XFS_AT_UID;
vattr.va_uid = attr->ia_uid;
@@ -663,6 +669,10 @@ xfs_vn_setattr(
vattr.va_mask |= XFS_AT_GID;
vattr.va_gid = attr->ia_gid;
}
+ if ((ia_valid & ATTR_TAG) && IS_TAGGED(inode)) {
+ vattr.va_mask |= XFS_AT_TAG;
+ vattr.va_tag = attr->ia_tag;
+ }
if (ia_valid & ATTR_SIZE) {
vattr.va_mask |= XFS_AT_SIZE;
vattr.va_size = attr->ia_size;
@@ -708,6 +718,42 @@ xfs_vn_truncate(
}
STATIC int
+xfs_vn_sync_flags(struct inode *inode)
+{
+ unsigned int oldflags, newflags;
+ int flags = 0;
+ int error;
+ bhv_vattr_t vattr;
+ bhv_vnode_t *vp = vn_from_inode(inode);
+
+ memset(&vattr, 0, sizeof vattr);
+
+ vattr.va_mask = XFS_AT_XFLAGS;
+ error = xfs_getattr(XFS_I(inode), &vattr, 0);
+
+ if (error)
+ return error;
+ oldflags = vattr.va_xflags;
+ newflags = oldflags & ~(XFS_XFLAG_IMMUTABLE |
+ XFS_XFLAG_IUNLINK | XFS_XFLAG_BARRIER);
+
+ if (IS_IMMUTABLE(inode))
+ newflags |= XFS_XFLAG_IMMUTABLE;
+ if (IS_IUNLINK(inode))
+ newflags |= XFS_XFLAG_IUNLINK;
+ if (IS_BARRIER(inode))
+ newflags |= XFS_XFLAG_BARRIER;
+
+ if (oldflags ^ newflags) {
+ vattr.va_xflags = newflags;
+ vattr.va_mask |= XFS_AT_XFLAGS;
+ error = xfs_setattr(XFS_I(inode), &vattr, flags, NULL);
+ }
+ vn_revalidate(vp);
+ return error;
+}
+
+STATIC int
xfs_vn_setxattr(
struct dentry *dentry,
const char *name,
@@ -881,6 +927,8 @@ const struct inode_operations xfs_dir_in
.getxattr = xfs_vn_getxattr,
.listxattr = xfs_vn_listxattr,
.removexattr = xfs_vn_removexattr,
+ .sync_flags = xfs_vn_sync_flags,
+ .sync_flags = xfs_vn_sync_flags,
};
const struct inode_operations xfs_symlink_inode_operations = {
@@ -894,4 +942,5 @@ const struct inode_operations xfs_symlin
.getxattr = xfs_vn_getxattr,
.listxattr = xfs_vn_listxattr,
.removexattr = xfs_vn_removexattr,
+ .sync_flags = xfs_vn_sync_flags,
};
--- a/fs/xfs/linux-2.6/xfs_linux.h 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/xfs/linux-2.6/xfs_linux.h 2008-04-19 15:14:52.000000000 -0400
@@ -128,6 +128,7 @@
#define current_pid() (current->pid)
#define current_fsuid(cred) (current->fsuid)
#define current_fsgid(cred) (current->fsgid)
+#define current_fstag(cred,vp) (dx_current_fstag(vn_to_inode(vp)->i_sb))
#define current_test_flags(f) (current->flags & (f))
#define current_set_flags_nested(sp, f) \
(*(sp) = current->flags, current->flags |= (f))
--- a/fs/xfs/linux-2.6/xfs_super.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/xfs/linux-2.6/xfs_super.c 2008-04-26 09:51:47.000000000 -0400
@@ -137,6 +137,9 @@ xfs_args_allocate(
#define MNTOPT_DMAPI "dmapi" /* DMI enabled (DMAPI / XDSM) */
#define MNTOPT_XDSM "xdsm" /* DMI enabled (DMAPI / XDSM) */
#define MNTOPT_DMI "dmi" /* DMI enabled (DMAPI / XDSM) */
+#define MNTOPT_TAGXID "tagxid" /* context tagging for inodes */
+#define MNTOPT_TAGGED "tag" /* context tagging for inodes */
+#define MNTOPT_NOTAGTAG "notag" /* do not use context tagging */
STATIC unsigned long
suffix_strtoul(char *s, char **endp, unsigned int base)
@@ -355,6 +358,19 @@ xfs_parseargs(
} else if (!strcmp(this_char, "irixsgid")) {
cmn_err(CE_WARN,
"XFS: irixsgid is now a sysctl(2) variable, option is deprecated.");
+#ifndef CONFIG_TAGGING_NONE
+ } else if (!strcmp(this_char, MNTOPT_TAGGED)) {
+ args->flags2 |= XFSMNT2_TAGGED;
+ } else if (!strcmp(this_char, MNTOPT_NOTAGTAG)) {
+ args->flags2 &= ~XFSMNT2_TAGGED;
+ } else if (!strcmp(this_char, MNTOPT_TAGXID)) {
+ args->flags2 |= XFSMNT2_TAGGED;
+#endif
+#ifdef CONFIG_PROPAGATE
+ } else if (!strcmp(this_char, MNTOPT_TAGGED)) {
+ /* use value */
+ args->flags2 |= XFSMNT2_TAGGED;
+#endif
} else {
cmn_err(CE_WARN,
"XFS: unknown mount option [%s].", this_char);
@@ -591,6 +607,7 @@ xfs_revalidate_inode(
inode->i_nlink = ip->i_d.di_nlink;
inode->i_uid = ip->i_d.di_uid;
inode->i_gid = ip->i_d.di_gid;
+ inode->i_tag = ip->i_d.di_tag;
switch (inode->i_mode & S_IFMT) {
case S_IFBLK:
@@ -612,6 +629,7 @@ xfs_revalidate_inode(
inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
inode->i_ctime.tv_nsec = ip->i_d.di_ctime.t_nsec;
+
if (ip->i_d.di_flags & XFS_DIFLAG_IMMUTABLE)
inode->i_flags |= S_IMMUTABLE;
else
@@ -628,6 +646,15 @@ xfs_revalidate_inode(
inode->i_flags |= S_NOATIME;
else
inode->i_flags &= ~S_NOATIME;
+
+ if (ip->i_d.di_vflags & XFS_DIVFLAG_IUNLINK)
+ inode->i_flags |= S_IUNLINK;
+ else
+ inode->i_flags &= ~S_IUNLINK;
+ if (ip->i_d.di_vflags & XFS_DIVFLAG_BARRIER)
+ inode->i_flags |= S_BARRIER;
+ else
+ inode->i_flags &= ~S_BARRIER;
xfs_iflags_clear(ip, XFS_IMODIFIED);
}
@@ -1220,6 +1247,12 @@ xfs_fs_remount(
int error;
error = xfs_parseargs(mp, options, args, 1);
+ if ((args->flags2 & XFSMNT2_TAGGED) &&
+ !(sb->s_flags & MS_TAGGED)) {
+ printk("XFS: %s: tagging not permitted on remount.\n",
+ sb->s_id);
+ error = EINVAL;
+ }
if (!error)
error = xfs_mntupdate(mp, flags, args);
kmem_free(args, sizeof(*args));
@@ -1336,6 +1369,9 @@ xfs_fs_fill_super(
if (error)
goto fail_vfsop;
+ if (mp->m_flags & XFS_MOUNT_TAGGED)
+ sb->s_flags |= MS_TAGGED;
+
sb->s_dirt = 1;
sb->s_magic = XFS_SB_MAGIC;
sb->s_blocksize = mp->m_sb.sb_blocksize;
--- a/fs/xfs/linux-2.6/xfs_vnode.c 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/xfs/linux-2.6/xfs_vnode.c 2008-04-19 17:04:58.000000000 -0400
@@ -105,6 +105,7 @@ vn_revalidate(
inode->i_mode = ip->i_d.di_mode;
inode->i_uid = ip->i_d.di_uid;
inode->i_gid = ip->i_d.di_gid;
+ inode->i_tag = ip->i_d.di_tag;
inode->i_mtime.tv_sec = ip->i_d.di_mtime.t_sec;
inode->i_mtime.tv_nsec = ip->i_d.di_mtime.t_nsec;
inode->i_ctime.tv_sec = ip->i_d.di_ctime.t_sec;
@@ -115,6 +116,14 @@ vn_revalidate(
inode->i_flags |= S_IMMUTABLE;
else
inode->i_flags &= ~S_IMMUTABLE;
+ if (xflags & XFS_XFLAG_IUNLINK)
+ inode->i_flags |= S_IUNLINK;
+ else
+ inode->i_flags &= ~S_IUNLINK;
+ if (xflags & XFS_XFLAG_BARRIER)
+ inode->i_flags |= S_BARRIER;
+ else
+ inode->i_flags &= ~S_BARRIER;
if (xflags & XFS_XFLAG_APPEND)
inode->i_flags |= S_APPEND;
else
--- a/fs/xfs/linux-2.6/xfs_vnode.h 2008-04-17 12:05:41.000000000 -0400
+++ a/fs/xfs/linux-2.6/xfs_vnode.h 2008-04-19 15:14:52.000000000 -0400
@@ -99,6 +99,7 @@ typedef struct bhv_vattr {
xfs_nlink_t va_nlink; /* number of references to file */
uid_t va_uid; /* owner user id */
gid_t va_gid; /* owner group id */
+ tag_t va_tag; /* owner group id */
xfs_ino_t va_nodeid; /* file id */
xfs_off_t va_size; /* file size in bytes */
u_long va_blocksize; /* blocksize preferred for i/o */
@@ -147,13 +148,15 @@ typedef struct bhv_vattr {
#define XFS_AT_PROJID 0x04000000
#define XFS_AT_SIZE_NOPERM 0x08000000
#define XFS_AT_GENCOUNT 0x10000000
+#define XFS_AT_TAG 0x20000000
#define XFS_AT_ALL (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
XFS_AT_ATIME|XFS_AT_MTIME|XFS_AT_CTIME|XFS_AT_RDEV|\
XFS_AT_BLKSIZE|XFS_AT_NBLOCKS|XFS_AT_VCODE|XFS_AT_MAC|\
XFS_AT_ACL|XFS_AT_CAP|XFS_AT_INF|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|\
- XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_PROJID|XFS_AT_GENCOUNT)
+ XFS_AT_NEXTENTS|XFS_AT_ANEXTENTS|XFS_AT_PROJID|XFS_AT_GENCOUNT\
+ XFS_AT_TAG)
#define XFS_AT_STAT (XFS_AT_TYPE|XFS_AT_MODE|XFS_AT_UID|XFS_AT_GID|\
XFS_AT_FSID|XFS_AT_NODEID|XFS_AT_NLINK|XFS_AT_SIZE|\
--- a/fs/xfs/quota/xfs_qm_syscalls.c 2008-04-17 12:05:42.000000000 -0400
+++ a/fs/xfs/quota/xfs_qm_syscalls.c 2008-04-19 15:14:52.000000000 -0400
@@ -17,6 +17,7 @@
*/
#include <linux/capability.h>
+#include <linux/vs_context.h>
#include "xfs.h"
#include "xfs_fs.h"
@@ -205,7 +206,7 @@ xfs_qm_scall_quotaoff(
xfs_qoff_logitem_t *qoffstart;
int nculprits;
- if (!force && !capable(CAP_SYS_ADMIN))
+ if (!force && !vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
return XFS_ERROR(EPERM);
/*
* No file system can have quotas enabled on disk but not in core.
@@ -374,7 +375,7 @@ xfs_qm_scall_trunc_qfiles(
int error;
xfs_inode_t *qip;
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
return XFS_ERROR(EPERM);
error = 0;
if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
@@ -418,7 +419,7 @@ xfs_qm_scall_quotaon(
uint accflags;
__int64_t sbflags;
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
return XFS_ERROR(EPERM);
flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);
@@ -587,7 +588,7 @@ xfs_qm_scall_setqlim(
int error;
xfs_qcnt_t hard, soft;
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_QUOTA_CTL))
return XFS_ERROR(EPERM);
if ((newlim->d_fieldmask &
--- a/fs/xfs/xfs_clnt.h 2008-04-17 12:05:42.000000000 -0400
+++ a/fs/xfs/xfs_clnt.h 2008-04-19 15:14:52.000000000 -0400
@@ -100,5 +100,6 @@ struct xfs_mount_args {
* I/O size in stat(2) */
#define XFSMNT2_FILESTREAMS 0x00000002 /* enable the filestreams
* allocator */
+#define XFSMNT2_TAGGED 0x80000000 /* context tagging */
#endif /* __XFS_CLNT_H__ */
--- a/fs/xfs/xfs_dinode.h 2008-04-17 12:05:42.000000000 -0400
+++ a/fs/xfs/xfs_dinode.h 2008-04-27 10:33:37.000000000 -0400
@@ -53,7 +53,9 @@ typedef struct xfs_dinode_core {
__be32 di_gid; /* owner's group id */
__be32 di_nlink; /* number of links to file */
__be16 di_projid; /* owner's project id */
- __u8 di_pad[8]; /* unused, zeroed space */
+ __be16 di_tag; /* context tagging */
+ __be16 di_vflags; /* vserver specific flags */
+ __u8 di_pad[4]; /* unused, zeroed space */
__be16 di_flushiter; /* incremented on flush */
xfs_timestamp_t di_atime; /* time last accessed */
xfs_timestamp_t di_mtime; /* time last modified */
@@ -136,7 +138,9 @@ typedef struct xfs_dinode
#define XFS_DI_NEXT_UNLINKED 0x1000000
#define XFS_DI_U 0x2000000
#define XFS_DI_A 0x4000000
-#define XFS_DI_NUM_BITS 27
+#define XFS_DI_VFLAGS 0x8000000
+#define XFS_DI_TAG 0x10000000
+#define XFS_DI_NUM_BITS 29
#define XFS_DI_ALL_BITS ((1 << XFS_DI_NUM_BITS) - 1)
#define XFS_DI_CORE_BITS (XFS_DI_ALL_BITS & ~(XFS_DI_U|XFS_DI_A))
@@ -223,6 +227,7 @@ typedef enum xfs_dinode_fmt
#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
+
#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
@@ -252,4 +257,7 @@ typedef enum xfs_dinode_fmt
XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM)
+#define XFS_DIVFLAG_BARRIER 0x01
+#define XFS_DIVFLAG_IUNLINK 0x02
+
#endif /* __XFS_DINODE_H__ */
--- a/fs/xfs/xfs_fs.h 2008-04-17 12:05:42.000000000 -0400
+++ a/fs/xfs/xfs_fs.h 2008-04-19 15:14:52.000000000 -0400
@@ -67,6 +67,8 @@ struct fsxattr {
#define XFS_XFLAG_EXTSZINHERIT 0x00001000 /* inherit inode extent size */
#define XFS_XFLAG_NODEFRAG 0x00002000 /* do not defragment */
#define XFS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */
+#define XFS_XFLAG_BARRIER 0x10000000 /* chroot() barrier */
+#define XFS_XFLAG_IUNLINK 0x20000000 /* immutable unlink */
#define XFS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
/*
@@ -296,7 +298,8 @@ typedef struct xfs_bstat {
__s32 bs_extents; /* number of extents */
__u32 bs_gen; /* generation count */
__u16 bs_projid; /* project id */
- unsigned char bs_pad[14]; /* pad space, unused */
+ __u16 bs_tag; /* context tagging */
+ unsigned char bs_pad[12]; /* pad space, unused */
__u32 bs_dmevmask; /* DMIG event mask */
__u16 bs_dmstate; /* DMIG state info */
__u16 bs_aextents; /* attribute number of extents */
--- a/fs/xfs/xfs_ialloc.c 2008-04-17 12:05:42.000000000 -0400
+++ a/fs/xfs/xfs_ialloc.c 2008-04-27 10:30:39.000000000 -0400
@@ -66,6 +66,8 @@ xfs_ialloc_log_di(
offsetof(xfs_dinode_core_t, di_gid),
offsetof(xfs_dinode_core_t, di_nlink),
offsetof(xfs_dinode_core_t, di_projid),
+ offsetof(xfs_dinode_core_t, di_tag),
+ offsetof(xfs_dinode_core_t, di_vflags),
offsetof(xfs_dinode_core_t, di_pad),
offsetof(xfs_dinode_core_t, di_atime),
offsetof(xfs_dinode_core_t, di_mtime),
--- a/fs/xfs/xfs_inode.c 2008-04-17 12:05:42.000000000 -0400
+++ a/fs/xfs/xfs_inode.c 2008-04-26 11:06:34.000000000 -0400
@@ -219,6 +219,7 @@ xfs_inotobp(
return 0;
}
+#include <linux/vs_tag.h>
/*
* This routine is called to map an inode to the buffer containing
@@ -716,13 +717,21 @@ xfs_dinode_from_disk(
xfs_icdinode_t *to,
xfs_dinode_core_t *from)
{
+ uint32_t uid, gid;
+
to->di_magic = be16_to_cpu(from->di_magic);
to->di_mode = be16_to_cpu(from->di_mode);
to->di_version = from ->di_version;
to->di_format = from->di_format;
to->di_onlink = be16_to_cpu(from->di_onlink);
- to->di_uid = be32_to_cpu(from->di_uid);
- to->di_gid = be32_to_cpu(from->di_gid);
+
+ uid = be32_to_cpu(from->di_uid);
+ gid = be32_to_cpu(from->di_gid);
+
+ to->di_uid = INOTAG_UID(1, uid, gid);
+ to->di_gid = INOTAG_GID(1, uid, gid);
+ to->di_tag = INOTAG_TAG(1, uid, gid, 0);
+
to->di_nlink = be32_to_cpu(from->di_nlink);
to->di_projid = be16_to_cpu(from->di_projid);
memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
@@ -756,8 +765,10 @@ xfs_dinode_to_disk(
to->di_version = from ->di_version;
to->di_format = from->di_format;
to->di_onlink = cpu_to_be16(from->di_onlink);
- to->di_uid = cpu_to_be32(from->di_uid);
- to->di_gid = cpu_to_be32(from->di_gid);
+
+ to->di_uid = cpu_to_be32(TAGINO_UID(1, from->di_uid, from->di_tag));
+ to->di_gid = cpu_to_be32(TAGINO_GID(1, from->di_gid, from->di_tag));
+
to->di_nlink = cpu_to_be32(from->di_nlink);
to->di_projid = cpu_to_be16(from->di_projid);
memcpy(to->di_pad, from->di_pad, sizeof(to->di_pad));
@@ -783,7 +794,8 @@ xfs_dinode_to_disk(
STATIC uint
_xfs_dic2xflags(
- __uint16_t di_flags)
+ __uint16_t di_flags,
+ __uint16_t di_vflags)
{
uint flags = 0;
@@ -817,7 +829,10 @@ _xfs_dic2xflags(
if (di_flags & XFS_DIFLAG_FILESTREAM)
flags |= XFS_XFLAG_FILESTREAM;
}
-
+ if (di_vflags & XFS_DIVFLAG_IUNLINK)
+ flags |= XFS_XFLAG_IUNLINK;
+ if (di_vflags & XFS_DIVFLAG_BARRIER)
+ flags |= XFS_XFLAG_BARRIER;
return flags;
}
@@ -827,7 +842,7 @@ xfs_ip2xflags(
{
xfs_icdinode_t *dic = &ip->i_d;
- return _xfs_dic2xflags(dic->di_flags) |
+ return _xfs_dic2xflags(dic->di_flags, dic->di_vflags) |
(XFS_IFORK_Q(ip) ? XFS_XFLAG_HASATTR : 0);
}
@@ -837,7 +852,7 @@ xfs_dic2xflags(
{
xfs_dinode_core_t *dic = &dip->di_core;
- return _xfs_dic2xflags(be16_to_cpu(dic->di_flags)) |
+ return _xfs_dic2xflags(be16_to_cpu(dic->di_flags), be16_to_cpu(dic->di_vflags)) |
(XFS_DFORK_Q(dip) ? XFS_XFLAG_HASATTR : 0);
}
@@ -1138,6 +1153,7 @@ xfs_ialloc(
ASSERT(ip->i_d.di_nlink == nlink);
ip->i_d.di_uid = current_fsuid(cr);
ip->i_d.di_gid = current_fsgid(cr);
+ ip->i_d.di_tag = current_fstag(cr, vp);
ip->i_d.di_projid = prid;
memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
--- a/fs/xfs/xfs_inode.h 2008-04-17 12:05:42.000000000 -0400
+++ a/fs/xfs/xfs_inode.h 2008-04-19 15:14:52.000000000 -0400
@@ -187,7 +187,9 @@ typedef struct xfs_icdinode {
__uint32_t di_gid; /* owner's group id */
__uint32_t di_nlink; /* number of links to file */
__uint16_t di_projid; /* owner's project id */
- __uint8_t di_pad[8]; /* unused, zeroed space */
+ __uint16_t di_tag; /* context tagging */
+ __uint16_t di_vflags; /* vserver specific flags */
+ __uint8_t di_pad[4]; /* unused, zeroed space */
__uint16_t di_flushiter; /* incremented on flush */
xfs_ictimestamp_t di_atime; /* time last accessed */
xfs_ictimestamp_t di_mtime; /* time last modified */
--- a/fs/xfs/xfs_itable.c 2008-04-17 12:05:42.000000000 -0400
+++ a/fs/xfs/xfs_itable.c 2008-04-19 15:14:52.000000000 -0400
@@ -89,6 +89,7 @@ xfs_bulkstat_one_iget(
buf->bs_mode = dic->di_mode;
buf->bs_uid = dic->di_uid;
buf->bs_gid = dic->di_gid;
+ buf->bs_tag = dic->di_tag;
buf->bs_size = dic->di_size;
vn_atime_to_bstime(vp, &buf->bs_atime);
buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
--- a/fs/xfs/xfs_mount.h 2008-04-17 12:05:43.000000000 -0400
+++ a/fs/xfs/xfs_mount.h 2008-04-19 15:14:52.000000000 -0400
@@ -378,6 +378,7 @@ typedef struct xfs_mount {
#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
allocator */
+#define XFS_MOUNT_TAGGED (1ULL << 31) /* context tagging */
/*
* Default minimum read and write sizes.
--- a/fs/xfs/xfs_vfsops.c 2008-04-17 12:05:43.000000000 -0400
+++ a/fs/xfs/xfs_vfsops.c 2008-04-26 09:52:49.000000000 -0400
@@ -290,6 +290,8 @@ xfs_start_flags(
if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE)
mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
+ if (ap->flags2 & XFSMNT2_TAGGED)
+ mp->m_flags |= XFS_MOUNT_TAGGED;
/*
* no recovery flag requires a read-only mount
@@ -402,7 +404,6 @@ xfs_finish_flags(
if (ap->flags & XFSMNT_PQUOTAENF)
mp->m_qflags |= XFS_OQUOTA_ENFD;
}
-
return 0;
}
--- a/fs/xfs/xfs_vnodeops.c 2008-04-17 12:05:43.000000000 -0400
+++ a/fs/xfs/xfs_vnodeops.c 2008-04-19 15:14:52.000000000 -0400
@@ -122,6 +122,7 @@ xfs_getattr(
vap->va_mode = ip->i_d.di_mode;
vap->va_uid = ip->i_d.di_uid;
vap->va_gid = ip->i_d.di_gid;
+ vap->va_tag = ip->i_d.di_tag;
vap->va_projid = ip->i_d.di_projid;
/*
@@ -221,6 +222,7 @@ xfs_setattr(
uint commit_flags=0;
uid_t uid=0, iuid=0;
gid_t gid=0, igid=0;
+ tag_t tag=0, itag=0;
int timeflags = 0;
xfs_prid_t projid=0, iprojid=0;
int mandlock_before, mandlock_after;
@@ -272,6 +274,7 @@ xfs_setattr(
(mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID))) {
uint qflags = 0;
+ /* TODO: handle tagging? */
if ((mask & XFS_AT_UID) && XFS_IS_UQUOTA_ON(mp)) {
uid = vap->va_uid;
qflags |= XFS_QMOPT_UQUOTA;
@@ -351,6 +354,8 @@ xfs_setattr(
if (mask &
(XFS_AT_MODE|XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_UID|
XFS_AT_GID|XFS_AT_PROJID)) {
+ /* TODO: handle tagging? */
+
/*
* CAP_FOWNER overrides the following restrictions:
*
@@ -399,7 +404,7 @@ xfs_setattr(
* and can change the group id only to a group of which he
* or she is a member.
*/
- if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) {
+ if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_TAG|XFS_AT_PROJID)) {
/*
* These IDs could have changed since we last looked at them.
* But, we're assured that if the ownership did change
@@ -407,10 +412,12 @@ xfs_setattr(
* would have changed also.
*/
iuid = ip->i_d.di_uid;
- iprojid = ip->i_d.di_projid;
igid = ip->i_d.di_gid;
- gid = (mask & XFS_AT_GID) ? vap->va_gid : igid;
+ itag = ip->i_d.di_tag;
+ iprojid = ip->i_d.di_projid;
uid = (mask & XFS_AT_UID) ? vap->va_uid : iuid;
+ gid = (mask & XFS_AT_GID) ? vap->va_gid : igid;
+ tag = (mask & XFS_AT_TAG) ? vap->va_tag : itag;
projid = (mask & XFS_AT_PROJID) ? (xfs_prid_t)vap->va_projid :
iprojid;
@@ -438,6 +445,7 @@ xfs_setattr(
if ((XFS_IS_UQUOTA_ON(mp) && iuid != uid) ||
(XFS_IS_PQUOTA_ON(mp) && iprojid != projid) ||
(XFS_IS_GQUOTA_ON(mp) && igid != gid)) {
+ /* TODO: handle tagging? */
ASSERT(tp);
code = XFS_QM_DQVOPCHOWNRESV(mp, tp, ip, udqp, gdqp,
capable(CAP_FOWNER) ?
@@ -686,7 +694,7 @@ xfs_setattr(
* and can change the group id only to a group of which he
* or she is a member.
*/
- if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_PROJID)) {
+ if (mask & (XFS_AT_UID|XFS_AT_GID|XFS_AT_TAG|XFS_AT_PROJID)) {
/*
* CAP_FSETID overrides the following restrictions:
*
@@ -702,6 +710,9 @@ xfs_setattr(
* Change the ownerships and register quota modifications
* in the transaction.
*/
+ if (itag != tag) {
+ ip->i_d.di_tag = tag;
+ }
if (iuid != uid) {
if (XFS_IS_UQUOTA_ON(mp)) {
ASSERT(mask & XFS_AT_UID);
@@ -777,6 +788,7 @@ xfs_setattr(
}
if (mask & XFS_AT_XFLAGS) {
uint di_flags;
+ uint di_vflags = 0;
/* can't set PREALLOC this way, just preserve it */
di_flags = (ip->i_d.di_flags & XFS_DIFLAG_PREALLOC);
@@ -810,6 +822,11 @@ xfs_setattr(
di_flags |= XFS_DIFLAG_EXTSIZE;
}
ip->i_d.di_flags = di_flags;
+ if (vap->va_xflags & XFS_XFLAG_IUNLINK)
+ di_vflags |= XFS_DIVFLAG_IUNLINK;
+ if (vap->va_xflags & XFS_XFLAG_BARRIER)
+ di_vflags |= XFS_DIVFLAG_BARRIER;
+ ip->i_d.di_vflags = di_vflags;
}
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
timeflags |= XFS_ICHGTIME_CHG;
--- a/include/asm-arm/tlb.h 2008-04-17 12:05:43.000000000 -0400
+++ a/include/asm-arm/tlb.h 2008-04-19 15:14:52.000000000 -0400
@@ -28,6 +28,7 @@
#else /* !CONFIG_MMU */
#include <asm/pgalloc.h>
+#include <linux/vs_memory.h>
/*
* TLB handling. This allows us to remove pages from the page
--- a/include/asm-blackfin/unistd.h 2008-04-17 12:05:43.000000000 -0400
+++ a/include/asm-blackfin/unistd.h 2008-04-19 15:14:52.000000000 -0400
@@ -278,7 +278,7 @@
#define __NR_tgkill 271
#define __NR_utimes 272
#define __NR_fadvise64_64 273
- /* 274 __NR_vserver */
+#define __NR_vserver 274
/* 275 __NR_mbind */
/* 276 __NR_get_mempolicy */
/* 277 __NR_set_mempolicy */
--- a/include/asm-generic/tlb.h 2008-04-17 12:05:43.000000000 -0400
+++ a/include/asm-generic/tlb.h 2008-04-19 15:50:15.000000000 -0400
@@ -14,6 +14,7 @@
#define _ASM_GENERIC__TLB_H
#include <linux/swap.h>
+#include <linux/vs_memory.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
--- a/include/asm-ia64/tlb.h 2007-02-04 13:44:54.000000000 -0500
+++ a/include/asm-ia64/tlb.h 2008-04-19 15:14:52.000000000 -0400
@@ -40,6 +40,7 @@
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/swap.h>
+#include <linux/vs_memory.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
--- a/include/asm-powerpc/systbl.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/asm-powerpc/systbl.h 2008-04-19 15:14:52.000000000 -0400
@@ -260,7 +260,7 @@ COMPAT_SYS_SPU(fstatfs64)
SYSX(sys_ni_syscall, ppc_fadvise64_64, ppc_fadvise64_64)
PPC_SYS_SPU(rtas)
OLDSYS(debug_setcontext)
-SYSCALL(ni_syscall)
+SYSX(sys_vserver, sys32_vserver, sys_vserver)
COMPAT_SYS(migrate_pages)
COMPAT_SYS(mbind)
COMPAT_SYS(get_mempolicy)
--- a/include/asm-powerpc/unistd.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/asm-powerpc/unistd.h 2008-04-19 15:14:52.000000000 -0400
@@ -275,7 +275,7 @@
#endif
#define __NR_rtas 255
#define __NR_sys_debug_setcontext 256
-/* Number 257 is reserved for vserver */
+#define __NR_vserver 257
#define __NR_migrate_pages 258
#define __NR_mbind 259
#define __NR_get_mempolicy 260
--- a/include/asm-s390/unistd.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/asm-s390/unistd.h 2008-04-19 15:14:52.000000000 -0400
@@ -202,7 +202,7 @@
#define __NR_clock_gettime (__NR_timer_create+6)
#define __NR_clock_getres (__NR_timer_create+7)
#define __NR_clock_nanosleep (__NR_timer_create+8)
-/* Number 263 is reserved for vserver */
+#define __NR_vserver 263
#define __NR_statfs64 265
#define __NR_fstatfs64 266
#define __NR_remap_file_pages 267
--- a/include/asm-sparc/unistd.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/asm-sparc/unistd.h 2008-04-19 15:14:52.000000000 -0400
@@ -282,7 +282,7 @@
#define __NR_timer_getoverrun 264
#define __NR_timer_delete 265
#define __NR_timer_create 266
-/* #define __NR_vserver 267 Reserved for VSERVER */
+#define __NR_vserver 267
#define __NR_io_setup 268
#define __NR_io_destroy 269
#define __NR_io_submit 270
--- a/include/asm-sparc64/tlb.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/asm-sparc64/tlb.h 2008-04-19 15:14:52.000000000 -0400
@@ -3,6 +3,7 @@
#include <linux/swap.h>
#include <linux/pagemap.h>
+#include <linux/vs_memory.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
--- a/include/asm-sparc64/unistd.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/asm-sparc64/unistd.h 2008-04-19 15:14:52.000000000 -0400
@@ -284,7 +284,7 @@
#define __NR_timer_getoverrun 264
#define __NR_timer_delete 265
#define __NR_timer_create 266
-/* #define __NR_vserver 267 Reserved for VSERVER */
+#define __NR_vserver 267
#define __NR_io_setup 268
#define __NR_io_destroy 269
#define __NR_io_submit 270
--- a/include/asm-x86/unistd_64.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/asm-x86/unistd_64.h 2008-04-19 15:14:52.000000000 -0400
@@ -535,7 +535,7 @@ __SYSCALL(__NR_tgkill, sys_tgkill)
#define __NR_utimes 235
__SYSCALL(__NR_utimes, sys_utimes)
#define __NR_vserver 236
-__SYSCALL(__NR_vserver, sys_ni_syscall)
+__SYSCALL(__NR_vserver, sys_vserver)
#define __NR_mbind 237
__SYSCALL(__NR_mbind, sys_mbind)
#define __NR_set_mempolicy 238
--- a/include/linux/capability.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/capability.h 2008-04-22 11:01:12.000000000 -0400
@@ -260,6 +260,7 @@ typedef struct kernel_cap_struct {
arbitrary SCSI commands */
/* Allow setting encryption key on loopback filesystem */
/* Allow setting zone reclaim policy */
+/* Allow the selection of a security context */
#define CAP_SYS_ADMIN 21
@@ -332,7 +333,13 @@ typedef struct kernel_cap_struct {
#define CAP_MAC_ADMIN 33
-#define CAP_LAST_CAP CAP_MAC_ADMIN
+/* Allow context manipulations */
+/* Allow changing context info on files */
+
+#define CAP_CONTEXT 34
+
+
+#define CAP_LAST_CAP CAP_CONTEXT
#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
--- a/include/linux/devpts_fs.h 2007-02-04 13:44:54.000000000 -0500
+++ a/include/linux/devpts_fs.h 2008-04-19 15:14:52.000000000 -0400
@@ -30,5 +30,4 @@ static inline void devpts_pty_kill(int n
#endif
-
#endif /* _LINUX_DEVPTS_FS_H */
--- a/include/linux/ext2_fs.h 2008-04-17 11:31:39.000000000 -0400
+++ a/include/linux/ext2_fs.h 2008-04-19 15:14:52.000000000 -0400
@@ -189,6 +189,8 @@ struct ext2_group_desc
#define EXT2_NOTAIL_FL FS_NOTAIL_FL /* file tail should not be merged */
#define EXT2_DIRSYNC_FL FS_DIRSYNC_FL /* dirsync behaviour (directories only) */
#define EXT2_TOPDIR_FL FS_TOPDIR_FL /* Top of directory hierarchies*/
+#define EXT2_BARRIER_FL FS_BARRIER_FL /* Barrier for chroot() */
+#define EXT2_IUNLINK_FL FS_IUNLINK_FL /* Immutable unlink */
#define EXT2_RESERVED_FL FS_RESERVED_FL /* reserved for ext2 lib */
#define EXT2_FL_USER_VISIBLE FS_FL_USER_VISIBLE /* User visible flags */
@@ -247,7 +249,7 @@ struct ext2_inode {
struct {
__u8 l_i_frag; /* Fragment number */
__u8 l_i_fsize; /* Fragment size */
- __u16 i_pad1;
+ __u16 l_i_tag; /* Context Tag */
__le16 l_i_uid_high; /* these 2 fields */
__le16 l_i_gid_high; /* were reserved2[0] */
__u32 l_i_reserved2;
@@ -279,6 +281,7 @@ struct ext2_inode {
#define i_gid_low i_gid
#define i_uid_high osd2.linux2.l_i_uid_high
#define i_gid_high osd2.linux2.l_i_gid_high
+#define i_raw_tag osd2.linux2.l_i_tag
#define i_reserved2 osd2.linux2.l_i_reserved2
#endif
@@ -323,6 +326,7 @@ struct ext2_inode {
#define EXT2_MOUNT_USRQUOTA 0x020000 /* user quota */
#define EXT2_MOUNT_GRPQUOTA 0x040000 /* group quota */
#define EXT2_MOUNT_RESERVATION 0x080000 /* Preallocation */
+#define EXT2_MOUNT_TAGGED (1<<24) /* Enable Context Tags */
#define clear_opt(o, opt) o &= ~EXT2_MOUNT_##opt
--- a/include/linux/ext3_fs.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/ext3_fs.h 2008-04-19 15:14:52.000000000 -0400
@@ -173,6 +173,8 @@ struct ext3_group_desc
#define EXT3_NOTAIL_FL 0x00008000 /* file tail should not be merged */
#define EXT3_DIRSYNC_FL 0x00010000 /* dirsync behaviour (directories only) */
#define EXT3_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
+#define EXT3_BARRIER_FL 0x04000000 /* Barrier for chroot() */
+#define EXT3_IUNLINK_FL 0x08000000 /* Immutable unlink */
#define EXT3_RESERVED_FL 0x80000000 /* reserved for ext3 lib */
#define EXT3_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
@@ -292,7 +294,7 @@ struct ext3_inode {
struct {
__u8 l_i_frag; /* Fragment number */
__u8 l_i_fsize; /* Fragment size */
- __u16 i_pad1;
+ __u16 l_i_tag; /* Context Tag */
__le16 l_i_uid_high; /* these 2 fields */
__le16 l_i_gid_high; /* were reserved2[0] */
__u32 l_i_reserved2;
@@ -326,6 +328,7 @@ struct ext3_inode {
#define i_gid_low i_gid
#define i_uid_high osd2.linux2.l_i_uid_high
#define i_gid_high osd2.linux2.l_i_gid_high
+#define i_raw_tag osd2.linux2.l_i_tag
#define i_reserved2 osd2.linux2.l_i_reserved2
#elif defined(__GNU__)
@@ -380,6 +383,7 @@ struct ext3_inode {
#define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
#define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
#define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
+#define EXT3_MOUNT_TAGGED (1<<24) /* Enable Context Tags */
/* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
#ifndef _LINUX_EXT2_FS_H
@@ -822,6 +826,7 @@ struct buffer_head * ext3_bread (handle_
int ext3_get_blocks_handle(handle_t *handle, struct inode *inode,
sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result,
int create, int extend_disksize);
+extern int ext3_sync_flags(struct inode *inode);
extern struct inode *ext3_iget(struct super_block *, unsigned long);
extern int ext3_write_inode (struct inode *, int);
--- a/include/linux/ext4_fs.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/ext4_fs.h 2008-04-19 15:43:04.000000000 -0400
@@ -231,6 +231,8 @@ struct ext4_group_desc
#define EXT4_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define EXT4_HUGE_FILE_FL 0x00040000 /* Set to each huge file */
#define EXT4_EXTENTS_FL 0x00080000 /* Inode uses extents */
+#define EXT4_BARRIER_FL 0x04000000 /* Barrier for chroot() */
+#define EXT4_IUNLINK_FL 0x08000000 /* Immutable unlink */
#define EXT4_RESERVED_FL 0x80000000 /* reserved for ext4 lib */
#define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
@@ -359,7 +361,8 @@ struct ext4_inode {
__le16 l_i_file_acl_high;
__le16 l_i_uid_high; /* these 2 fields */
__le16 l_i_gid_high; /* were reserved2[0] */
- __u32 l_i_reserved2;
+ __u16 l_i_tag; /* Context Tag */
+ __u16 l_i_reserved2;
} linux2;
struct {
__le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
@@ -465,6 +468,7 @@ do { \
#define i_gid_low i_gid
#define i_uid_high osd2.linux2.l_i_uid_high
#define i_gid_high osd2.linux2.l_i_gid_high
+#define i_raw_tag osd2.linux2.l_i_tag
#define i_reserved2 osd2.linux2.l_i_reserved2
#elif defined(__GNU__)
@@ -528,6 +532,7 @@ do { \
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
#define EXT4_MOUNT_I_VERSION 0x2000000 /* i_version support */
#define EXT4_MOUNT_MBALLOC 0x4000000 /* Buddy allocation support */
+#define EXT4_MOUNT_TAGGED 0x8000000 /* Enable Context Tags */
/* Compatibility, for having both ext2_fs.h and ext4_fs.h included at once */
#ifndef _LINUX_EXT2_FS_H
#define clear_opt(o, opt) o &= ~EXT4_MOUNT_##opt
@@ -1030,6 +1035,7 @@ int ext4_get_blocks_handle(handle_t *han
ext4_lblk_t iblock, unsigned long maxblocks,
struct buffer_head *bh_result,
int create, int extend_disksize);
+extern int ext4_sync_flags(struct inode *inode);
extern struct inode *ext4_iget(struct super_block *, unsigned long);
extern int ext4_write_inode (struct inode *, int);
--- a/include/linux/fs.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/fs.h 2008-04-19 15:37:28.000000000 -0400
@@ -125,6 +125,8 @@ extern int dir_notify_enable;
#define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
#define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
#define MS_I_VERSION (1<<23) /* Update inode I_version field */
+#define MS_TAGGED (1<<24) /* use generic inode tagging */
+#define MS_TAGID (1<<25) /* use specific tag for this mount */
#define MS_ACTIVE (1<<30)
#define MS_NOUSER (1<<31)
@@ -151,6 +153,8 @@ extern int dir_notify_enable;
#define S_NOCMTIME 128 /* Do not update file c/mtime */
#define S_SWAPFILE 256 /* Do not truncate: swapon got its bmaps */
#define S_PRIVATE 512 /* Inode is fs-internal */
+#define S_BARRIER 1024 /* Barrier for chroot() */
+#define S_IUNLINK 2048 /* Immutable unlink */
/*
* Note that nosuid etc flags are inode-specific: setting some file-system
@@ -167,25 +171,37 @@ extern int dir_notify_enable;
*/
#define __IS_FLG(inode,flg) ((inode)->i_sb->s_flags & (flg))
-#define IS_RDONLY(inode) ((inode)->i_sb->s_flags & MS_RDONLY)
+#define IS_RDONLY(inode) __IS_FLG(inode, MS_RDONLY)
#define IS_SYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS) || \
((inode)->i_flags & S_SYNC))
#define IS_DIRSYNC(inode) (__IS_FLG(inode, MS_SYNCHRONOUS|MS_DIRSYNC) || \
((inode)->i_flags & (S_SYNC|S_DIRSYNC)))
#define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
-#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
-#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
+#define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
+#define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
+#define IS_TAGGED(inode) __IS_FLG(inode, MS_TAGGED)
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+#define IS_IUNLINK(inode) ((inode)->i_flags & S_IUNLINK)
+#define IS_IXORUNLINK(inode) ((IS_IUNLINK(inode) ? S_IMMUTABLE : 0) ^ IS_IMMUTABLE(inode))
#define IS_POSIXACL(inode) __IS_FLG(inode, MS_POSIXACL)
+#define IS_BARRIER(inode) (S_ISDIR((inode)->i_mode) && ((inode)->i_flags & S_BARRIER))
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
#define IS_SWAPFILE(inode) ((inode)->i_flags & S_SWAPFILE)
#define IS_PRIVATE(inode) ((inode)->i_flags & S_PRIVATE)
+#ifdef CONFIG_VSERVER_COWBL
+# define IS_COW(inode) (IS_IUNLINK(inode) && IS_IMMUTABLE(inode))
+# define IS_COW_LINK(inode) (S_ISREG((inode)->i_mode) && ((inode)->i_nlink > 1))
+#else
+# define IS_COW(inode) (0)
+# define IS_COW_LINK(inode) (0)
+#endif
+
/* the read-only stuff doesn't really belong here, but any other place is
probably as bad and I don't want to create yet another include file. */
@@ -259,12 +275,13 @@ extern int dir_notify_enable;
#define FS_TOPDIR_FL 0x00020000 /* Top of directory hierarchies*/
#define FS_EXTENT_FL 0x00080000 /* Extents */
#define FS_DIRECTIO_FL 0x00100000 /* Use direct i/o */
+#define FS_BARRIER_FL 0x04000000 /* Barrier for chroot() */
+#define FS_IUNLINK_FL 0x08000000 /* Immutable unlink */
#define FS_RESERVED_FL 0x80000000 /* reserved for ext2 lib */
#define FS_FL_USER_VISIBLE 0x0003DFFF /* User visible flags */
#define FS_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
-
#define SYNC_FILE_RANGE_WAIT_BEFORE 1
#define SYNC_FILE_RANGE_WRITE 2
#define SYNC_FILE_RANGE_WAIT_AFTER 4
@@ -334,6 +351,7 @@ typedef void (dio_iodone_t)(struct kiocb
#define ATTR_FILE 8192
#define ATTR_KILL_PRIV 16384
#define ATTR_OPEN 32768 /* Truncating from open(O_TRUNC) */
+#define ATTR_TAG 65536
/*
* This is the Inode Attributes structure, used for notify_change(). It
@@ -349,6 +367,7 @@ struct iattr {
umode_t ia_mode;
uid_t ia_uid;
gid_t ia_gid;
+ tag_t ia_tag;
loff_t ia_size;
struct timespec ia_atime;
struct timespec ia_mtime;
@@ -362,6 +381,9 @@ struct iattr {
struct file *ia_file;
};
+#define ATTR_FLAG_BARRIER 512 /* Barrier for chroot() */
+#define ATTR_FLAG_IUNLINK 1024 /* Immutable unlink */
+
/*
* Includes for diskquotas.
*/
@@ -600,7 +622,9 @@ struct inode {
unsigned int i_nlink;
uid_t i_uid;
gid_t i_gid;
+ tag_t i_tag;
dev_t i_rdev;
+ dev_t i_mdev;
u64 i_version;
loff_t i_size;
#ifdef __NEED_I_SIZE_ORDERED
@@ -735,12 +759,12 @@ static inline void i_size_write(struct i
static inline unsigned iminor(const struct inode *inode)
{
- return MINOR(inode->i_rdev);
+ return MINOR(inode->i_mdev);
}
static inline unsigned imajor(const struct inode *inode)
{
- return MAJOR(inode->i_rdev);
+ return MAJOR(inode->i_mdev);
}
extern struct block_device *I_BDEV(struct inode *inode);
@@ -795,6 +819,7 @@ struct file {
loff_t f_pos;
struct fown_struct f_owner;
unsigned int f_uid, f_gid;
+ xid_t f_xid;
struct file_ra_state f_ra;
u64 f_version;
@@ -879,6 +904,7 @@ struct file_lock {
unsigned char fl_type;
loff_t fl_start;
loff_t fl_end;
+ xid_t fl_xid;
struct fasync_struct * fl_fasync; /* for lease break notifications */
unsigned long fl_break_time; /* for nonblocking lease breaks */
@@ -1076,12 +1102,12 @@ extern void unlock_super(struct super_bl
*/
extern int vfs_permission(struct nameidata *, int);
extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
-extern int vfs_mkdir(struct inode *, struct dentry *, int);
-extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
-extern int vfs_symlink(struct inode *, struct dentry *, const char *, int);
-extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
-extern int vfs_rmdir(struct inode *, struct dentry *);
-extern int vfs_unlink(struct inode *, struct dentry *);
+extern int vfs_mkdir(struct inode *, struct dentry *, int, struct nameidata *);
+extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t, struct nameidata *);
+extern int vfs_symlink(struct inode *, struct dentry *, const char *, int, struct nameidata *);
+extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct nameidata *);
+extern int vfs_rmdir(struct inode *, struct dentry *, struct nameidata *);
+extern int vfs_unlink(struct inode *, struct dentry *, struct nameidata *);
extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
/*
@@ -1223,6 +1249,7 @@ struct inode_operations {
void (*truncate_range)(struct inode *, loff_t, loff_t);
long (*fallocate)(struct inode *inode, int mode, loff_t offset,
loff_t len);
+ int (*sync_flags) (struct inode *);
};
struct seq_file;
@@ -1238,6 +1265,7 @@ extern ssize_t vfs_readv(struct file *,
unsigned long, loff_t *);
extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
unsigned long, loff_t *);
+ssize_t vfs_sendfile(struct file *, struct file *, loff_t *, size_t, loff_t);
/*
* NOTE: write_inode, delete_inode, clear_inode, put_inode can be called
@@ -1934,6 +1962,7 @@ extern int dcache_dir_open(struct inode
extern int dcache_dir_close(struct inode *, struct file *);
extern loff_t dcache_dir_lseek(struct file *, loff_t, int);
extern int dcache_readdir(struct file *, void *, filldir_t);
+extern int dcache_readdir_filter(struct file *, void *, filldir_t, int (*)(struct dentry *));
extern int simple_getattr(struct vfsmount *, struct dentry *, struct kstat *);
extern int simple_statfs(struct dentry *, struct kstatfs *);
extern int simple_link(struct dentry *, struct inode *, struct dentry *);
--- a/include/linux/if_tun.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/if_tun.h 2008-04-19 15:14:52.000000000 -0400
@@ -42,6 +42,7 @@
#define TUNSETOWNER _IOW('T', 204, int)
#define TUNSETLINK _IOW('T', 205, int)
#define TUNSETGROUP _IOW('T', 206, int)
+#define TUNSETNID _IOW('T', 215, int)
/* TUNSETIFF ifr flags */
#define IFF_TUN 0x0001
--- a/include/linux/init_task.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/init_task.h 2008-04-19 15:14:52.000000000 -0400
@@ -196,6 +196,10 @@ extern struct group_info init_groups;
INIT_IDS \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
+ .xid = 0, \
+ .vx_info = NULL, \
+ .nid = 0, \
+ .nx_info = NULL, \
}
--- a/include/linux/interrupt.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/interrupt.h 2008-04-19 15:14:52.000000000 -0400
@@ -8,8 +8,8 @@
#include <linux/preempt.h>
#include <linux/cpumask.h>
#include <linux/irqreturn.h>
-#include <linux/hardirq.h>
#include <linux/sched.h>
+#include <linux/hardirq.h>
#include <linux/irqflags.h>
#include <asm/atomic.h>
#include <asm/ptrace.h>
--- a/include/linux/ipc.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/ipc.h 2008-04-19 15:14:52.000000000 -0400
@@ -93,6 +93,7 @@ struct kern_ipc_perm
key_t key;
uid_t uid;
gid_t gid;
+ xid_t xid;
uid_t cuid;
gid_t cgid;
mode_t mode;
--- a/include/linux/Kbuild 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/Kbuild 2008-04-19 15:14:52.000000000 -0400
@@ -354,3 +354,6 @@ unifdef-y += xattr.h
unifdef-y += xfrm.h
objhdr-y += version.h
+
+header-y += vserver/
+
--- a/include/linux/loop.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/loop.h 2008-04-19 15:14:52.000000000 -0400
@@ -45,6 +45,7 @@ struct loop_device {
struct loop_func_table *lo_encryption;
__u32 lo_init[2];
uid_t lo_key_owner; /* Who set the key */
+ xid_t lo_xid;
int (*ioctl)(struct loop_device *, int cmd,
unsigned long arg);
--- a/include/linux/magic.h 2008-04-17 11:31:39.000000000 -0400
+++ a/include/linux/magic.h 2008-04-19 15:14:52.000000000 -0400
@@ -3,7 +3,7 @@
#define ADFS_SUPER_MAGIC 0xadf5
#define AFFS_SUPER_MAGIC 0xadff
-#define AFS_SUPER_MAGIC 0x5346414F
+#define AFS_SUPER_MAGIC 0x5346414F
#define AUTOFS_SUPER_MAGIC 0x0187
#define CODA_SUPER_MAGIC 0x73757245
#define EFS_SUPER_MAGIC 0x414A53
@@ -26,6 +26,7 @@
#define NFS_SUPER_MAGIC 0x6969
#define OPENPROM_SUPER_MAGIC 0x9fa1
#define PROC_SUPER_MAGIC 0x9fa0
+#define DEVPTS_SUPER_MAGIC 0x1cd1
#define QNX4_SUPER_MAGIC 0x002f /* qnx4 fs detection */
#define REISERFS_SUPER_MAGIC 0x52654973 /* used by gcc */
--- a/include/linux/major.h 2008-04-17 10:37:24.000000000 -0400
+++ a/include/linux/major.h 2008-04-19 15:14:52.000000000 -0400
@@ -15,6 +15,7 @@
#define HD_MAJOR IDE0_MAJOR
#define PTY_SLAVE_MAJOR 3
#define TTY_MAJOR 4
+#define VROOT_MAJOR 4
#define TTYAUX_MAJOR 5
#define LP_MAJOR 6
#define VCS_MAJOR 7
--- a/include/linux/mm_types.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/mm_types.h 2008-04-19 15:14:52.000000000 -0400
@@ -201,6 +201,7 @@ struct mm_struct {
/* Architecture-specific MM context */
mm_context_t context;
+ struct vx_info *mm_vx_info;
/* Swap token stuff */
/*
--- a/include/linux/mount.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/mount.h 2008-04-19 15:45:19.000000000 -0400
@@ -28,6 +28,9 @@ struct mnt_namespace;
#define MNT_NOATIME 0x08
#define MNT_NODIRATIME 0x10
#define MNT_RELATIME 0x20
+#define MNT_RDONLY 0x40
+
+#define MNT_IS_RDONLY(m) ((m) && ((m)->mnt_flags & MNT_RDONLY))
#define MNT_SHRINKABLE 0x100
@@ -35,6 +38,10 @@ struct mnt_namespace;
#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
#define MNT_PNODE_MASK 0x3000 /* propagation flag mask */
+#define MNT_TAGID 0x10000
+#define MNT_NOTAG 0x20000
+#define MNT_NOTAGCHECK 0x40000
+
struct vfsmount {
struct list_head mnt_hash;
struct vfsmount *mnt_parent; /* fs we are mounted on */
@@ -62,6 +69,7 @@ struct vfsmount {
int mnt_expiry_mark; /* true if marked for expiry */
int mnt_pinned;
int mnt_ghosts;
+ tag_t mnt_tag; /* tagging used for vfsmount */
};
static inline struct vfsmount *mntget(struct vfsmount *mnt)
--- a/include/linux/net.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/net.h 2008-04-19 15:14:52.000000000 -0400
@@ -65,6 +65,7 @@ typedef enum {
#define SOCK_NOSPACE 2
#define SOCK_PASSCRED 3
#define SOCK_PASSSEC 4
+#define SOCK_USER_SOCKET 5
#ifndef ARCH_HAS_SOCKET_TYPES
/**
--- a/include/linux/nfs_mount.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/nfs_mount.h 2008-04-19 15:14:52.000000000 -0400
@@ -63,6 +63,7 @@ struct nfs_mount_data {
#define NFS_MOUNT_SECFLAVOUR 0x2000 /* 5 */
#define NFS_MOUNT_NORDIRPLUS 0x4000 /* 5 */
#define NFS_MOUNT_UNSHARED 0x8000 /* 5 */
-#define NFS_MOUNT_FLAGMASK 0xFFFF
+#define NFS_MOUNT_TAGGED 0x10000 /* context tagging */
+#define NFS_MOUNT_FLAGMASK 0x1FFFF
#endif
--- a/include/linux/nsproxy.h 2008-04-17 11:31:39.000000000 -0400
+++ a/include/linux/nsproxy.h 2008-04-19 15:14:52.000000000 -0400
@@ -3,6 +3,7 @@
#include <linux/spinlock.h>
#include <linux/sched.h>
+#include <linux/vserver/debug.h>
struct mnt_namespace;
struct uts_namespace;
@@ -63,22 +64,33 @@ static inline struct nsproxy *task_nspro
}
int copy_namespaces(unsigned long flags, struct task_struct *tsk);
+struct nsproxy *copy_nsproxy(struct nsproxy *orig);
void exit_task_namespaces(struct task_struct *tsk);
void switch_task_namespaces(struct task_struct *tsk, struct nsproxy *new);
void free_nsproxy(struct nsproxy *ns);
int unshare_nsproxy_namespaces(unsigned long, struct nsproxy **,
struct fs_struct *);
-static inline void put_nsproxy(struct nsproxy *ns)
+#define get_nsproxy(n) __get_nsproxy(n, __FILE__, __LINE__)
+
+static inline void __get_nsproxy(struct nsproxy *ns,
+ const char *_file, int _line)
{
- if (atomic_dec_and_test(&ns->count)) {
- free_nsproxy(ns);
- }
+ vxlprintk(VXD_CBIT(space, 0), "get_nsproxy(%p[%u])",
+ ns, atomic_read(&ns->count), _file, _line);
+ atomic_inc(&ns->count);
}
-static inline void get_nsproxy(struct nsproxy *ns)
+#define put_nsproxy(n) __put_nsproxy(n, __FILE__, __LINE__)
+
+static inline void __put_nsproxy(struct nsproxy *ns,
+ const char *_file, int _line)
{
- atomic_inc(&ns->count);
+ vxlprintk(VXD_CBIT(space, 0), "put_nsproxy(%p[%u])",
+ ns, atomic_read(&ns->count), _file, _line);
+ if (atomic_dec_and_test(&ns->count)) {
+ free_nsproxy(ns);
+ }
}
#ifdef CONFIG_CGROUP_NS
--- a/include/linux/pid.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/pid.h 2008-04-29 17:56:00.000000000 -0400
@@ -8,7 +8,8 @@ enum pid_type
PIDTYPE_PID,
PIDTYPE_PGID,
PIDTYPE_SID,
- PIDTYPE_MAX
+ PIDTYPE_MAX,
+ PIDTYPE_REALPID
};
/*
@@ -142,6 +143,7 @@ static inline pid_t pid_nr(struct pid *p
}
pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns);
+pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns);
pid_t pid_vnr(struct pid *pid);
#define do_each_pid_task(pid, type, task) \
--- a/include/linux/proc_fs.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/proc_fs.h 2008-04-19 15:44:54.000000000 -0400
@@ -59,6 +59,7 @@ struct proc_dir_entry {
nlink_t nlink;
uid_t uid;
gid_t gid;
+ int vx_flags;
loff_t size;
const struct inode_operations *proc_iops;
/*
@@ -265,16 +266,23 @@ static inline void kclist_add(struct kco
extern void kclist_add(struct kcore_list *, void *, size_t);
#endif
+struct vx_info;
+struct nx_info;
+
union proc_op {
int (*proc_get_link)(struct inode *, struct path *);
int (*proc_read)(struct task_struct *task, char *page);
int (*proc_show)(struct seq_file *m,
struct pid_namespace *ns, struct pid *pid,
struct task_struct *task);
+ int (*proc_vs_read)(char *page);
+ int (*proc_vxi_read)(struct vx_info *vxi, char *page);
+ int (*proc_nxi_read)(struct nx_info *nxi, char *page);
};
struct proc_inode {
struct pid *pid;
+ int vx_flags;
int fd;
union proc_op op;
struct proc_dir_entry *pde;
--- a/include/linux/reiserfs_fs.h 2008-05-21 14:30:05.000000000 -0400
+++ a/include/linux/reiserfs_fs.h 2008-05-21 14:30:41.000000000 -0400
@@ -837,6 +837,10 @@ struct stat_data_v1 {
#define REISERFS_COMPR_FL FS_COMPR_FL
#define REISERFS_NOTAIL_FL FS_NOTAIL_FL
+/* unfortunately reiserfs sdattr is only 16 bit */
+#define REISERFS_BARRIER_FL (FS_BARRIER_FL >> 16)
+#define REISERFS_IUNLINK_FL (FS_IUNLINK_FL >> 16)
+
/* persistent flags that file inherits from the parent directory */
#define REISERFS_INHERIT_MASK ( REISERFS_IMMUTABLE_FL | \
REISERFS_SYNC_FL | \
@@ -846,6 +850,9 @@ struct stat_data_v1 {
REISERFS_COMPR_FL | \
REISERFS_NOTAIL_FL )
+#define REISERFS_FL_USER_VISIBLE 0x80FF
+#define REISERFS_FL_USER_MODIFIABLE 0x80FF
+
/* Stat Data on disk (reiserfs version of UFS disk inode minus the
address blocks) */
struct stat_data {
@@ -1911,6 +1918,7 @@ static inline void reiserfs_update_sd(st
void sd_attrs_to_i_attrs(__u16 sd_attrs, struct inode *inode);
void i_attrs_to_sd_attrs(struct inode *inode, __u16 * sd_attrs);
int reiserfs_setattr(struct dentry *dentry, struct iattr *attr);
+int reiserfs_sync_flags(struct inode *inode);
/* namei.c */
void set_de_name_and_namelen(struct reiserfs_dir_entry *de);
--- a/include/linux/reiserfs_fs_sb.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/reiserfs_fs_sb.h 2008-04-19 15:14:52.000000000 -0400
@@ -456,6 +456,7 @@ enum reiserfs_mount_options {
REISERFS_POSIXACL,
REISERFS_BARRIER_NONE,
REISERFS_BARRIER_FLUSH,
+ REISERFS_TAGGED,
/* Actions on error */
REISERFS_ERROR_PANIC,
--- a/include/linux/sched.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/sched.h 2008-04-21 11:09:01.000000000 -0400
@@ -73,7 +73,6 @@ struct sched_param {
#include <linux/fs_struct.h>
#include <linux/compiler.h>
#include <linux/completion.h>
-#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
#include <linux/proportions.h>
@@ -89,6 +88,7 @@ struct sched_param {
#include <linux/task_io_accounting.h>
#include <linux/kobject.h>
#include <linux/latencytop.h>
+#include <linux/pid.h>
#include <asm/processor.h>
@@ -355,25 +355,27 @@ extern void arch_unmap_area_topdown(stru
* The mm counters are not protected by its page_table_lock,
* so must be incremented atomically.
*/
-#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
-#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
-#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
-#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
-#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
-
+#define __set_mm_counter(mm, member, value) \
+ atomic_long_set(&(mm)->_##member, value)
+#define get_mm_counter(mm, member) \
+ ((unsigned long)atomic_long_read(&(mm)->_##member))
#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
/*
* The mm counters are protected by its page_table_lock,
* so can be incremented directly.
*/
-#define set_mm_counter(mm, member, value) (mm)->_##member = (value)
+#define __set_mm_counter(mm, member, value) (mm)->_##member = (value)
#define get_mm_counter(mm, member) ((mm)->_##member)
-#define add_mm_counter(mm, member, value) (mm)->_##member += (value)
-#define inc_mm_counter(mm, member) (mm)->_##member++
-#define dec_mm_counter(mm, member) (mm)->_##member--
#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#define set_mm_counter(mm, member, value) \
+ vx_ ## member ## pages_sub((mm), (get_mm_counter(mm, member) - value))
+#define add_mm_counter(mm, member, value) \
+ vx_ ## member ## pages_add((mm), (value))
+#define inc_mm_counter(mm, member) vx_ ## member ## pages_inc((mm))
+#define dec_mm_counter(mm, member) vx_ ## member ## pages_dec((mm))
+
#define get_mm_rss(mm) \
(get_mm_counter(mm, file_rss) + get_mm_counter(mm, anon_rss))
#define update_hiwater_rss(mm) do { \
@@ -1162,6 +1164,14 @@ struct task_struct {
#endif
seccomp_t seccomp;
+/* vserver context data */
+ struct vx_info *vx_info;
+ struct nx_info *nx_info;
+
+ xid_t xid;
+ nid_t nid;
+ tag_t tag;
+
/* Thread group tracking */
u32 parent_exec_id;
u32 self_exec_id;
@@ -1350,6 +1360,11 @@ struct pid_namespace;
* see also pid_nr() etc in include/linux/pid.h
*/
+#include <linux/vserver/base.h>
+#include <linux/vserver/context.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/pid.h>
+
static inline pid_t task_pid_nr(struct task_struct *tsk)
{
return tsk->pid;
@@ -1359,7 +1374,7 @@ pid_t task_pid_nr_ns(struct task_struct
static inline pid_t task_pid_vnr(struct task_struct *tsk)
{
- return pid_vnr(task_pid(tsk));
+ return vx_map_pid(pid_vnr(task_pid(tsk)));
}
@@ -1372,7 +1387,7 @@ pid_t task_tgid_nr_ns(struct task_struct
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
- return pid_vnr(task_tgid(tsk));
+ return vx_map_tgid(pid_vnr(task_tgid(tsk)));
}
--- a/include/linux/shmem_fs.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/shmem_fs.h 2008-04-19 15:14:52.000000000 -0400
@@ -8,6 +8,9 @@
#define SHMEM_NR_DIRECT 16
+#define TMPFS_SUPER_MAGIC 0x01021994
+
+
struct shmem_inode_info {
spinlock_t lock;
unsigned long flags;
--- a/include/linux/stat.h 2008-04-17 10:33:07.000000000 -0400
+++ a/include/linux/stat.h 2008-04-19 15:14:52.000000000 -0400
@@ -66,6 +66,7 @@ struct kstat {
unsigned int nlink;
uid_t uid;
gid_t gid;
+ tag_t tag;
dev_t rdev;
loff_t size;
struct timespec atime;
--- a/include/linux/sunrpc/auth.h 2008-04-17 10:37:24.000000000 -0400
+++ a/include/linux/sunrpc/auth.h 2008-04-19 15:14:52.000000000 -0400
@@ -25,6 +25,7 @@
struct auth_cred {
uid_t uid;
gid_t gid;
+ tag_t tag;
struct group_info *group_info;
};
--- a/include/linux/sunrpc/clnt.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/sunrpc/clnt.h 2008-04-19 15:14:52.000000000 -0400
@@ -42,7 +42,8 @@ struct rpc_clnt {
unsigned int cl_softrtry : 1,/* soft timeouts */
cl_discrtry : 1,/* disconnect before retry */
- cl_autobind : 1;/* use getport() */
+ cl_autobind : 1,/* use getport() */
+ cl_tag : 1;/* context tagging */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
--- a/include/linux/syscalls.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/syscalls.h 2008-04-19 15:14:52.000000000 -0400
@@ -294,6 +294,8 @@ asmlinkage long sys_symlink(const char _
asmlinkage long sys_unlink(const char __user *pathname);
asmlinkage long sys_rename(const char __user *oldname,
const char __user *newname);
+asmlinkage long sys_copyfile(const char __user *from, const char __user *to,
+ umode_t mode);
asmlinkage long sys_chmod(const char __user *filename, mode_t mode);
asmlinkage long sys_fchmod(unsigned int fd, mode_t mode);
--- a/include/linux/sysctl.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/sysctl.h 2008-04-19 15:14:52.000000000 -0400
@@ -70,6 +70,7 @@ enum
CTL_ABI=9, /* Binary emulation */
CTL_CPU=10, /* CPU stuff (speed scaling, etc) */
CTL_ARLAN=254, /* arlan wireless driver */
+ CTL_VSERVER=4242, /* Linux-VServer debug */
CTL_S390DBF=5677, /* s390 debug */
CTL_SUNRPC=7249, /* sunrpc debug */
CTL_PM=9899, /* frv power management */
@@ -104,6 +105,7 @@ enum
KERN_PANIC=15, /* int: panic timeout */
KERN_REALROOTDEV=16, /* real root device to mount after initrd */
+ KERN_VSHELPER=17, /* string: path to vshelper policy agent */
KERN_SPARC_REBOOT=21, /* reboot command on Sparc */
KERN_CTLALTDEL=22, /* int: allow ctl-alt-del to reboot */
--- a/include/linux/sysfs.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/sysfs.h 2008-04-19 15:14:52.000000000 -0400
@@ -17,6 +17,8 @@
#include <linux/list.h>
#include <asm/atomic.h>
+#define SYSFS_SUPER_MAGIC 0x62656572
+
struct kobject;
struct module;
--- a/include/linux/time.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/time.h 2008-04-19 15:14:52.000000000 -0400
@@ -183,6 +183,9 @@ static inline void timespec_add_ns(struc
}
a->tv_nsec = ns;
}
+
+#include <linux/vs_time.h>
+
#endif /* __KERNEL__ */
#define NFDBITS __NFDBITS
--- a/include/linux/types.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/linux/types.h 2008-04-19 15:14:52.000000000 -0400
@@ -36,6 +36,9 @@ typedef __kernel_uid32_t uid_t;
typedef __kernel_gid32_t gid_t;
typedef __kernel_uid16_t uid16_t;
typedef __kernel_gid16_t gid16_t;
+typedef unsigned int xid_t;
+typedef unsigned int nid_t;
+typedef unsigned int tag_t;
typedef unsigned long uintptr_t;
--- a/include/linux/vroot.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vroot.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,51 @@
+
+/*
+ * include/linux/vroot.h
+ *
+ * written by Herbert P<>tzl, 9/11/2002
+ * ported to 2.6 by Herbert P<>tzl, 30/12/2004
+ *
+ * Copyright (C) 2002-2007 by Herbert P<>tzl.
+ * Redistribution of this file is permitted under the
+ * GNU General Public License.
+ */
+
+#ifndef _LINUX_VROOT_H
+#define _LINUX_VROOT_H
+
+
+#ifdef __KERNEL__
+
+/* Possible states of device */
+enum {
+ Vr_unbound,
+ Vr_bound,
+};
+
+struct vroot_device {
+ int vr_number;
+ int vr_refcnt;
+
+ struct semaphore vr_ctl_mutex;
+ struct block_device *vr_device;
+ int vr_state;
+};
+
+
+typedef struct block_device *(vroot_grb_func)(struct block_device *);
+
+extern int register_vroot_grb(vroot_grb_func *);
+extern int unregister_vroot_grb(vroot_grb_func *);
+
+#endif /* __KERNEL__ */
+
+#define MAX_VROOT_DEFAULT 8
+
+/*
+ * IOCTL commands --- we will commandeer 0x56 ('V')
+ */
+
+#define VROOT_SET_DEV 0x5600
+#define VROOT_CLR_DEV 0x5601
+
+#endif /* _LINUX_VROOT_H */
--- a/include/linux/vs_base.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_base.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,10 @@
+#ifndef _VS_BASE_H
+#define _VS_BASE_H
+
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/debug.h"
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_context.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_context.h 2008-04-29 18:42:09.000000000 -0400
@@ -0,0 +1,227 @@
+#ifndef _VS_CONTEXT_H
+#define _VS_CONTEXT_H
+
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/context.h"
+#include "vserver/history.h"
+#include "vserver/debug.h"
+
+#include <linux/sched.h>
+
+
+#define get_vx_info(i) __get_vx_info(i, __FILE__, __LINE__, __HERE__)
+
+static inline struct vx_info *__get_vx_info(struct vx_info *vxi,
+ const char *_file, int _line, void *_here)
+{
+ if (!vxi)
+ return NULL;
+
+ vxlprintk(VXD_CBIT(xid, 2), "get_vx_info(%p[#%d.%d])",
+ vxi, vxi ? vxi->vx_id : 0,
+ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+ _file, _line);
+ __vxh_get_vx_info(vxi, _here);
+
+ atomic_inc(&vxi->vx_usecnt);
+ return vxi;
+}
+
+
+extern void free_vx_info(struct vx_info *);
+
+#define put_vx_info(i) __put_vx_info(i, __FILE__, __LINE__, __HERE__)
+
+static inline void __put_vx_info(struct vx_info *vxi,
+ const char *_file, int _line, void *_here)
+{
+ if (!vxi)
+ return;
+
+ vxlprintk(VXD_CBIT(xid, 2), "put_vx_info(%p[#%d.%d])",
+ vxi, vxi ? vxi->vx_id : 0,
+ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+ _file, _line);
+ __vxh_put_vx_info(vxi, _here);
+
+ if (atomic_dec_and_test(&vxi->vx_usecnt))
+ free_vx_info(vxi);
+}
+
+
+#define init_vx_info(p, i) \
+ __init_vx_info(p, i, __FILE__, __LINE__, __HERE__)
+
+static inline void __init_vx_info(struct vx_info **vxp, struct vx_info *vxi,
+ const char *_file, int _line, void *_here)
+{
+ if (vxi) {
+ vxlprintk(VXD_CBIT(xid, 3),
+ "init_vx_info(%p[#%d.%d])",
+ vxi, vxi ? vxi->vx_id : 0,
+ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+ _file, _line);
+ __vxh_init_vx_info(vxi, vxp, _here);
+
+ atomic_inc(&vxi->vx_usecnt);
+ }
+ *vxp = vxi;
+}
+
+
+#define set_vx_info(p, i) \
+ __set_vx_info(p, i, __FILE__, __LINE__, __HERE__)
+
+static inline void __set_vx_info(struct vx_info **vxp, struct vx_info *vxi,
+ const char *_file, int _line, void *_here)
+{
+ struct vx_info *vxo;
+
+ if (!vxi)
+ return;
+
+ vxlprintk(VXD_CBIT(xid, 3), "set_vx_info(%p[#%d.%d])",
+ vxi, vxi ? vxi->vx_id : 0,
+ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+ _file, _line);
+ __vxh_set_vx_info(vxi, vxp, _here);
+
+ atomic_inc(&vxi->vx_usecnt);
+ vxo = xchg(vxp, vxi);
+ BUG_ON(vxo);
+}
+
+
+#define clr_vx_info(p) __clr_vx_info(p, __FILE__, __LINE__, __HERE__)
+
+static inline void __clr_vx_info(struct vx_info **vxp,
+ const char *_file, int _line, void *_here)
+{
+ struct vx_info *vxo;
+
+ vxo = xchg(vxp, NULL);
+ if (!vxo)
+ return;
+
+ vxlprintk(VXD_CBIT(xid, 3), "clr_vx_info(%p[#%d.%d])",
+ vxo, vxo ? vxo->vx_id : 0,
+ vxo ? atomic_read(&vxo->vx_usecnt) : 0,
+ _file, _line);
+ __vxh_clr_vx_info(vxo, vxp, _here);
+
+ if (atomic_dec_and_test(&vxo->vx_usecnt))
+ free_vx_info(vxo);
+}
+
+
+#define claim_vx_info(v, p) \
+ __claim_vx_info(v, p, __FILE__, __LINE__, __HERE__)
+
+static inline void __claim_vx_info(struct vx_info *vxi,
+ struct task_struct *task,
+ const char *_file, int _line, void *_here)
+{
+ vxlprintk(VXD_CBIT(xid, 3), "claim_vx_info(%p[#%d.%d.%d]) %p",
+ vxi, vxi ? vxi->vx_id : 0,
+ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+ vxi ? atomic_read(&vxi->vx_tasks) : 0,
+ task, _file, _line);
+ __vxh_claim_vx_info(vxi, task, _here);
+
+ atomic_inc(&vxi->vx_tasks);
+}
+
+
+extern void unhash_vx_info(struct vx_info *);
+
+#define release_vx_info(v, p) \
+ __release_vx_info(v, p, __FILE__, __LINE__, __HERE__)
+
+static inline void __release_vx_info(struct vx_info *vxi,
+ struct task_struct *task,
+ const char *_file, int _line, void *_here)
+{
+ vxlprintk(VXD_CBIT(xid, 3), "release_vx_info(%p[#%d.%d.%d]) %p",
+ vxi, vxi ? vxi->vx_id : 0,
+ vxi ? atomic_read(&vxi->vx_usecnt) : 0,
+ vxi ? atomic_read(&vxi->vx_tasks) : 0,
+ task, _file, _line);
+ __vxh_release_vx_info(vxi, task, _here);
+
+ might_sleep();
+
+ if (atomic_dec_and_test(&vxi->vx_tasks))
+ unhash_vx_info(vxi);
+}
+
+
+#define task_get_vx_info(p) \
+ __task_get_vx_info(p, __FILE__, __LINE__, __HERE__)
+
+static inline struct vx_info *__task_get_vx_info(struct task_struct *p,
+ const char *_file, int _line, void *_here)
+{
+ struct vx_info *vxi;
+
+ task_lock(p);
+ vxlprintk(VXD_CBIT(xid, 5), "task_get_vx_info(%p)",
+ p, _file, _line);
+ vxi = __get_vx_info(p->vx_info, _file, _line, _here);
+ task_unlock(p);
+ return vxi;
+}
+
+
+static inline void __wakeup_vx_info(struct vx_info *vxi)
+{
+ if (waitqueue_active(&vxi->vx_wait))
+ wake_up_interruptible(&vxi->vx_wait);
+}
+
+
+#define enter_vx_info(v, s) __enter_vx_info(v, s, __FILE__, __LINE__)
+
+static inline void __enter_vx_info(struct vx_info *vxi,
+ struct vx_info_save *vxis, const char *_file, int _line)
+{
+ vxlprintk(VXD_CBIT(xid, 5), "enter_vx_info(%p[#%d],%p) %p[#%d,%p]",
+ vxi, vxi ? vxi->vx_id : 0, vxis, current,
+ current->xid, current->vx_info, _file, _line);
+ vxis->vxi = xchg(&current->vx_info, vxi);
+ vxis->xid = current->xid;
+ current->xid = vxi ? vxi->vx_id : 0;
+}
+
+#define leave_vx_info(s) __leave_vx_info(s, __FILE__, __LINE__)
+
+static inline void __leave_vx_info(struct vx_info_save *vxis,
+ const char *_file, int _line)
+{
+ vxlprintk(VXD_CBIT(xid, 5), "leave_vx_info(%p[#%d,%p]) %p[#%d,%p]",
+ vxis, vxis->xid, vxis->vxi, current,
+ current->xid, current->vx_info, _file, _line);
+ (void)xchg(&current->vx_info, vxis->vxi);
+ current->xid = vxis->xid;
+}
+
+
+static inline void __enter_vx_admin(struct vx_info_save *vxis)
+{
+ vxis->vxi = xchg(&current->vx_info, NULL);
+ vxis->xid = xchg(&current->xid, (xid_t)0);
+}
+
+static inline void __leave_vx_admin(struct vx_info_save *vxis)
+{
+ (void)xchg(&current->xid, vxis->xid);
+ (void)xchg(&current->vx_info, vxis->vxi);
+}
+
+extern void exit_vx_info(struct task_struct *, int);
+extern void exit_vx_info_early(struct task_struct *, int);
+
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_cowbl.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_cowbl.h 2008-04-21 13:49:51.000000000 -0400
@@ -0,0 +1,44 @@
+#ifndef _VS_COWBL_H
+#define _VS_COWBL_H
+
+#include <linux/fs.h>
+#include <linux/dcache.h>
+#include <linux/namei.h>
+
+extern struct dentry *cow_break_link(const char *pathname);
+
+static inline int cow_check_and_break(struct nameidata *nd)
+{
+ struct inode *inode = nd->path.dentry->d_inode;
+ int error = 0;
+ if (IS_RDONLY(inode) || MNT_IS_RDONLY(nd->path.mnt))
+ return -EROFS;
+ if (IS_COW(inode)) {
+ if (IS_COW_LINK(inode)) {
+ struct dentry *new_dentry, *old_dentry = nd->path.dentry;
+ char *path, *buf;
+
+ buf = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!buf) {
+ return -ENOMEM;
+ }
+ path = d_path(&nd->path, buf, PATH_MAX);
+ new_dentry = cow_break_link(path);
+ kfree(buf);
+ if (!IS_ERR(new_dentry)) {
+ nd->path.dentry = new_dentry;
+ dput(old_dentry);
+ } else
+ error = PTR_ERR(new_dentry);
+ } else {
+ inode->i_flags &= ~(S_IUNLINK | S_IMMUTABLE);
+ inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(inode);
+ }
+ }
+ return error;
+}
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_cvirt.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_cvirt.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,50 @@
+#ifndef _VS_CVIRT_H
+#define _VS_CVIRT_H
+
+#include "vserver/cvirt.h"
+#include "vserver/context.h"
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/debug.h"
+
+
+static inline void vx_activate_task(struct task_struct *p)
+{
+ struct vx_info *vxi;
+
+ if ((vxi = p->vx_info)) {
+ vx_update_load(vxi);
+ atomic_inc(&vxi->cvirt.nr_running);
+ }
+}
+
+static inline void vx_deactivate_task(struct task_struct *p)
+{
+ struct vx_info *vxi;
+
+ if ((vxi = p->vx_info)) {
+ vx_update_load(vxi);
+ atomic_dec(&vxi->cvirt.nr_running);
+ }
+}
+
+static inline void vx_uninterruptible_inc(struct task_struct *p)
+{
+ struct vx_info *vxi;
+
+ if ((vxi = p->vx_info))
+ atomic_inc(&vxi->cvirt.nr_uninterruptible);
+}
+
+static inline void vx_uninterruptible_dec(struct task_struct *p)
+{
+ struct vx_info *vxi;
+
+ if ((vxi = p->vx_info))
+ atomic_dec(&vxi->cvirt.nr_uninterruptible);
+}
+
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_device.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_device.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,45 @@
+#ifndef _VS_DEVICE_H
+#define _VS_DEVICE_H
+
+#include "vserver/base.h"
+#include "vserver/device.h"
+#include "vserver/debug.h"
+
+
+#ifdef CONFIG_VSERVER_DEVICE
+
+int vs_map_device(struct vx_info *, dev_t, dev_t *, umode_t);
+
+#define vs_device_perm(v, d, m, p) \
+ ((vs_map_device(current_vx_info(), d, NULL, m) & (p)) == (p))
+
+#else
+
+static inline
+int vs_map_device(struct vx_info *vxi,
+ dev_t device, dev_t *target, umode_t mode)
+{
+ if (target)
+ *target = device;
+ return ~0;
+}
+
+#define vs_device_perm(v, d, m, p) ((p) == (p))
+
+#endif
+
+
+#define vs_map_chrdev(d, t, p) \
+ ((vs_map_device(current_vx_info(), d, t, S_IFCHR) & (p)) == (p))
+#define vs_map_blkdev(d, t, p) \
+ ((vs_map_device(current_vx_info(), d, t, S_IFBLK) & (p)) == (p))
+
+#define vs_chrdev_perm(d, p) \
+ vs_device_perm(current_vx_info(), d, S_IFCHR, p)
+#define vs_blkdev_perm(d, p) \
+ vs_device_perm(current_vx_info(), d, S_IFBLK, p)
+
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_dlimit.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_dlimit.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,211 @@
+#ifndef _VS_DLIMIT_H
+#define _VS_DLIMIT_H
+
+#include <linux/fs.h>
+
+#include "vserver/dlimit.h"
+#include "vserver/base.h"
+#include "vserver/debug.h"
+
+
+#define get_dl_info(i) __get_dl_info(i, __FILE__, __LINE__)
+
+static inline struct dl_info *__get_dl_info(struct dl_info *dli,
+ const char *_file, int _line)
+{
+ if (!dli)
+ return NULL;
+ vxlprintk(VXD_CBIT(dlim, 4), "get_dl_info(%p[#%d.%d])",
+ dli, dli ? dli->dl_tag : 0,
+ dli ? atomic_read(&dli->dl_usecnt) : 0,
+ _file, _line);
+ atomic_inc(&dli->dl_usecnt);
+ return dli;
+}
+
+
+#define free_dl_info(i) \
+ call_rcu(&(i)->dl_rcu, rcu_free_dl_info)
+
+#define put_dl_info(i) __put_dl_info(i, __FILE__, __LINE__)
+
+static inline void __put_dl_info(struct dl_info *dli,
+ const char *_file, int _line)
+{
+ if (!dli)
+ return;
+ vxlprintk(VXD_CBIT(dlim, 4), "put_dl_info(%p[#%d.%d])",
+ dli, dli ? dli->dl_tag : 0,
+ dli ? atomic_read(&dli->dl_usecnt) : 0,
+ _file, _line);
+ if (atomic_dec_and_test(&dli->dl_usecnt))
+ free_dl_info(dli);
+}
+
+
+#define __dlimit_char(d) ((d) ? '*' : ' ')
+
+static inline int __dl_alloc_space(struct super_block *sb,
+ tag_t tag, dlsize_t nr, const char *file, int line)
+{
+ struct dl_info *dli = NULL;
+ int ret = 0;
+
+ if (nr == 0)
+ goto out;
+ dli = locate_dl_info(sb, tag);
+ if (!dli)
+ goto out;
+
+ spin_lock(&dli->dl_lock);
+ ret = (dli->dl_space_used + nr > dli->dl_space_total);
+ if (!ret)
+ dli->dl_space_used += nr;
+ spin_unlock(&dli->dl_lock);
+ put_dl_info(dli);
+out:
+ vxlprintk(VXD_CBIT(dlim, 1),
+ "ALLOC (%p,#%d)%c %lld bytes (%d)",
+ sb, tag, __dlimit_char(dli), (long long)nr,
+ ret, file, line);
+ return ret;
+}
+
+static inline void __dl_free_space(struct super_block *sb,
+ tag_t tag, dlsize_t nr, const char *_file, int _line)
+{
+ struct dl_info *dli = NULL;
+
+ if (nr == 0)
+ goto out;
+ dli = locate_dl_info(sb, tag);
+ if (!dli)
+ goto out;
+
+ spin_lock(&dli->dl_lock);
+ if (dli->dl_space_used > nr)
+ dli->dl_space_used -= nr;
+ else
+ dli->dl_space_used = 0;
+ spin_unlock(&dli->dl_lock);
+ put_dl_info(dli);
+out:
+ vxlprintk(VXD_CBIT(dlim, 1),
+ "FREE (%p,#%d)%c %lld bytes",
+ sb, tag, __dlimit_char(dli), (long long)nr,
+ _file, _line);
+}
+
+static inline int __dl_alloc_inode(struct super_block *sb,
+ tag_t tag, const char *_file, int _line)
+{
+ struct dl_info *dli;
+ int ret = 0;
+
+ dli = locate_dl_info(sb, tag);
+ if (!dli)
+ goto out;
+
+ spin_lock(&dli->dl_lock);
+ ret = (dli->dl_inodes_used >= dli->dl_inodes_total);
+ if (!ret)
+ dli->dl_inodes_used++;
+ spin_unlock(&dli->dl_lock);
+ put_dl_info(dli);
+out:
+ vxlprintk(VXD_CBIT(dlim, 0),
+ "ALLOC (%p,#%d)%c inode (%d)",
+ sb, tag, __dlimit_char(dli), ret, _file, _line);
+ return ret;
+}
+
+static inline void __dl_free_inode(struct super_block *sb,
+ tag_t tag, const char *_file, int _line)
+{
+ struct dl_info *dli;
+
+ dli = locate_dl_info(sb, tag);
+ if (!dli)
+ goto out;
+
+ spin_lock(&dli->dl_lock);
+ if (dli->dl_inodes_used > 1)
+ dli->dl_inodes_used--;
+ else
+ dli->dl_inodes_used = 0;
+ spin_unlock(&dli->dl_lock);
+ put_dl_info(dli);
+out:
+ vxlprintk(VXD_CBIT(dlim, 0),
+ "FREE (%p,#%d)%c inode",
+ sb, tag, __dlimit_char(dli), _file, _line);
+}
+
+static inline void __dl_adjust_block(struct super_block *sb, tag_t tag,
+ unsigned long long *free_blocks, unsigned long long *root_blocks,
+ const char *_file, int _line)
+{
+ struct dl_info *dli;
+ uint64_t broot, bfree;
+
+ dli = locate_dl_info(sb, tag);
+ if (!dli)
+ return;
+
+ spin_lock(&dli->dl_lock);
+ broot = (dli->dl_space_total -
+ (dli->dl_space_total >> 10) * dli->dl_nrlmult)
+ >> sb->s_blocksize_bits;
+ bfree = (dli->dl_space_total - dli->dl_space_used)
+ >> sb->s_blocksize_bits;
+ spin_unlock(&dli->dl_lock);
+
+ vxlprintk(VXD_CBIT(dlim, 2),
+ "ADJUST: %lld,%lld on %lld,%lld [mult=%d]",
+ (long long)bfree, (long long)broot,
+ *free_blocks, *root_blocks, dli->dl_nrlmult,
+ _file, _line);
+ if (free_blocks) {
+ if (*free_blocks > bfree)
+ *free_blocks = bfree;
+ }
+ if (root_blocks) {
+ if (*root_blocks > broot)
+ *root_blocks = broot;
+ }
+ put_dl_info(dli);
+}
+
+#define DLIMIT_ALLOC_SPACE(in, bytes) \
+ __dl_alloc_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
+ __FILE__, __LINE__ )
+
+#define DLIMIT_FREE_SPACE(in, bytes) \
+ __dl_free_space((in)->i_sb, (in)->i_tag, (dlsize_t)(bytes), \
+ __FILE__, __LINE__ )
+
+#define DLIMIT_ALLOC_BLOCK(in, nr) \
+ __dl_alloc_space((in)->i_sb, (in)->i_tag, \
+ ((dlsize_t)(nr)) << (in)->i_sb->s_blocksize_bits, \
+ __FILE__, __LINE__ )
+
+#define DLIMIT_FREE_BLOCK(in, nr) \
+ __dl_free_space((in)->i_sb, (in)->i_tag, \
+ ((dlsize_t)(nr)) << (in)->i_sb->s_blocksize_bits, \
+ __FILE__, __LINE__ )
+
+
+#define DLIMIT_ALLOC_INODE(in) \
+ __dl_alloc_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ )
+
+#define DLIMIT_FREE_INODE(in) \
+ __dl_free_inode((in)->i_sb, (in)->i_tag, __FILE__, __LINE__ )
+
+
+#define DLIMIT_ADJUST_BLOCK(sb, tag, fb, rb) \
+ __dl_adjust_block(sb, tag, fb, rb, __FILE__, __LINE__ )
+
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vserver/base.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/base.h 2008-04-23 20:52:31.000000000 -0400
@@ -0,0 +1,157 @@
+#ifndef _VX_BASE_H
+#define _VX_BASE_H
+
+
+/* context state changes */
+
+enum {
+ VSC_STARTUP = 1,
+ VSC_SHUTDOWN,
+
+ VSC_NETUP,
+ VSC_NETDOWN,
+};
+
+
+
+#define vx_task_xid(t) ((t)->xid)
+
+#define vx_current_xid() vx_task_xid(current)
+
+#define current_vx_info() (current->vx_info)
+
+
+#define nx_task_nid(t) ((t)->nid)
+
+#define nx_current_nid() nx_task_nid(current)
+
+#define current_nx_info() (current->nx_info)
+
+
+/* generic flag merging */
+
+#define vs_check_flags(v, m, f) (((v) & (m)) ^ (f))
+
+#define vs_mask_flags(v, f, m) (((v) & ~(m)) | ((f) & (m)))
+
+#define vs_mask_mask(v, f, m) (((v) & ~(m)) | ((v) & (f) & (m)))
+
+#define vs_check_bit(v, n) ((v) & (1LL << (n)))
+
+
+/* context flags */
+
+#define __vx_flags(v) ((v) ? (v)->vx_flags : 0)
+
+#define vx_current_flags() __vx_flags(current->vx_info)
+
+#define vx_info_flags(v, m, f) \
+ vs_check_flags(__vx_flags(v), m, f)
+
+#define task_vx_flags(t, m, f) \
+ ((t) && vx_info_flags((t)->vx_info, m, f))
+
+#define vx_flags(m, f) vx_info_flags(current->vx_info, m, f)
+
+
+/* context caps */
+
+#define __vx_ccaps(v) ((v) ? (v)->vx_ccaps : 0)
+
+#define vx_current_ccaps() __vx_ccaps(current->vx_info)
+
+#define vx_info_ccaps(v, c) (__vx_ccaps(v) & (c))
+
+#define vx_ccaps(c) vx_info_ccaps(current->vx_info, (c))
+
+
+
+/* network flags */
+
+#define __nx_flags(n) ((n) ? (n)->nx_flags : 0)
+
+#define nx_current_flags() __nx_flags(current->nx_info)
+
+#define nx_info_flags(n, m, f) \
+ vs_check_flags(__nx_flags(n), m, f)
+
+#define task_nx_flags(t, m, f) \
+ ((t) && nx_info_flags((t)->nx_info, m, f))
+
+#define nx_flags(m, f) nx_info_flags(current->nx_info, m, f)
+
+
+/* network caps */
+
+#define __nx_ncaps(n) ((n) ? (n)->nx_ncaps : 0)
+
+#define nx_current_ncaps() __nx_ncaps(current->nx_info)
+
+#define nx_info_ncaps(n, c) (__nx_ncaps(n) & (c))
+
+#define nx_ncaps(c) nx_info_ncaps(current->nx_info, c)
+
+
+/* context mask capabilities */
+
+#define __vx_mcaps(v) ((v) ? (v)->vx_ccaps >> 32UL : ~0 )
+
+#define vx_info_mcaps(v, c) (__vx_mcaps(v) & (c))
+
+#define vx_mcaps(c) vx_info_mcaps(current->vx_info, c)
+
+
+/* context bcap mask */
+
+#define __vx_bcaps(v) ((v)->vx_bcaps)
+
+#define vx_current_bcaps() __vx_bcaps(current->vx_info)
+
+
+/* mask given bcaps */
+
+#define vx_info_mbcaps(v, c) ((v) ? cap_intersect(__vx_bcaps(v), c) : c)
+
+#define vx_mbcaps(c) vx_info_mbcaps(current->vx_info, c)
+
+
+/* masked cap_bset */
+
+#define vx_info_cap_bset(v) vx_info_mbcaps(v, current->cap_bset)
+
+#define vx_current_cap_bset() vx_info_cap_bset(current->vx_info)
+
+#if 0
+#define vx_info_mbcap(v, b) \
+ (!vx_info_flags(v, VXF_STATE_SETUP, 0) ? \
+ vx_info_bcaps(v, b) : (b))
+
+#define task_vx_mbcap(t, b) \
+ vx_info_mbcap((t)->vx_info, (t)->b)
+
+#define vx_mbcap(b) task_vx_mbcap(current, b)
+#endif
+
+#define vx_cap_raised(v, c, f) cap_raised(vx_info_mbcaps(v, c), f)
+
+#define vx_capable(b, c) (capable(b) || \
+ (cap_raised(current->cap_effective, b) && vx_ccaps(c)))
+
+#define nx_capable(b, c) (capable(b) || \
+ (cap_raised(current->cap_effective, b) && nx_ncaps(c)))
+
+#define vx_current_initpid(n) \
+ (current->vx_info && \
+ (current->vx_info->vx_initpid == (n)))
+
+
+#define __vx_state(v) ((v) ? ((v)->vx_state) : 0)
+
+#define vx_info_state(v, m) (__vx_state(v) & (m))
+
+
+#define __nx_state(n) ((n) ? ((n)->nx_state) : 0)
+
+#define nx_info_state(n, m) (__nx_state(n) & (m))
+
+#endif
--- a/include/linux/vserver/cacct_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/cacct_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,23 @@
+#ifndef _VX_CACCT_CMD_H
+#define _VX_CACCT_CMD_H
+
+
+/* virtual host info name commands */
+
+#define VCMD_sock_stat VC_CMD(VSTAT, 5, 0)
+
+struct vcmd_sock_stat_v0 {
+ uint32_t field;
+ uint32_t count[3];
+ uint64_t total[3];
+};
+
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+
+extern int vc_sock_stat(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_CACCT_CMD_H */
--- a/include/linux/vserver/cacct_def.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/cacct_def.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,43 @@
+#ifndef _VX_CACCT_DEF_H
+#define _VX_CACCT_DEF_H
+
+#include <asm/atomic.h>
+#include <linux/vserver/cacct.h>
+
+
+struct _vx_sock_acc {
+ atomic_long_t count;
+ atomic_long_t total;
+};
+
+/* context sub struct */
+
+struct _vx_cacct {
+ struct _vx_sock_acc sock[VXA_SOCK_SIZE][3];
+ atomic_t slab[8];
+ atomic_t page[6][8];
+};
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_cacct(struct _vx_cacct *cacct)
+{
+ int i, j;
+
+ printk("\t_vx_cacct:");
+ for (i = 0; i < 6; i++) {
+ struct _vx_sock_acc *ptr = cacct->sock[i];
+
+ printk("\t [%d] =", i);
+ for (j = 0; j < 3; j++) {
+ printk(" [%d] = %8lu, %8lu", j,
+ atomic_long_read(&ptr[j].count),
+ atomic_long_read(&ptr[j].total));
+ }
+ printk("\n");
+ }
+}
+
+#endif
+
+#endif /* _VX_CACCT_DEF_H */
--- a/include/linux/vserver/cacct.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/cacct.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,15 @@
+#ifndef _VX_CACCT_H
+#define _VX_CACCT_H
+
+
+enum sock_acc_field {
+ VXA_SOCK_UNSPEC = 0,
+ VXA_SOCK_UNIX,
+ VXA_SOCK_INET,
+ VXA_SOCK_INET6,
+ VXA_SOCK_PACKET,
+ VXA_SOCK_OTHER,
+ VXA_SOCK_SIZE /* array size */
+};
+
+#endif /* _VX_CACCT_H */
--- a/include/linux/vserver/cacct_int.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/cacct_int.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,21 @@
+#ifndef _VX_CACCT_INT_H
+#define _VX_CACCT_INT_H
+
+
+#ifdef __KERNEL__
+
+static inline
+unsigned long vx_sock_count(struct _vx_cacct *cacct, int type, int pos)
+{
+ return atomic_long_read(&cacct->sock[type][pos].count);
+}
+
+
+static inline
+unsigned long vx_sock_total(struct _vx_cacct *cacct, int type, int pos)
+{
+ return atomic_long_read(&cacct->sock[type][pos].total);
+}
+
+#endif /* __KERNEL__ */
+#endif /* _VX_CACCT_INT_H */
--- a/include/linux/vserver/check.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/check.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,89 @@
+#ifndef _VS_CHECK_H
+#define _VS_CHECK_H
+
+
+#define MAX_S_CONTEXT 65535 /* Arbitrary limit */
+
+#ifdef CONFIG_VSERVER_DYNAMIC_IDS
+#define MIN_D_CONTEXT 49152 /* dynamic contexts start here */
+#else
+#define MIN_D_CONTEXT 65536
+#endif
+
+/* check conditions */
+
+#define VS_ADMIN 0x0001
+#define VS_WATCH 0x0002
+#define VS_HIDE 0x0004
+#define VS_HOSTID 0x0008
+
+#define VS_IDENT 0x0010
+#define VS_EQUIV 0x0020
+#define VS_PARENT 0x0040
+#define VS_CHILD 0x0080
+
+#define VS_ARG_MASK 0x00F0
+
+#define VS_DYNAMIC 0x0100
+#define VS_STATIC 0x0200
+
+#define VS_ATR_MASK 0x0F00
+
+#ifdef CONFIG_VSERVER_PRIVACY
+#define VS_ADMIN_P (0)
+#define VS_WATCH_P (0)
+#else
+#define VS_ADMIN_P VS_ADMIN
+#define VS_WATCH_P VS_WATCH
+#endif
+
+#define VS_HARDIRQ 0x1000
+#define VS_SOFTIRQ 0x2000
+#define VS_IRQ 0x4000
+
+#define VS_IRQ_MASK 0xF000
+
+#include <linux/hardirq.h>
+
+/*
+ * check current context for ADMIN/WATCH and
+ * optionally against supplied argument
+ */
+static inline int __vs_check(int cid, int id, unsigned int mode)
+{
+ if (mode & VS_ARG_MASK) {
+ if ((mode & VS_IDENT) && (id == cid))
+ return 1;
+ }
+ if (mode & VS_ATR_MASK) {
+ if ((mode & VS_DYNAMIC) &&
+ (id >= MIN_D_CONTEXT) &&
+ (id <= MAX_S_CONTEXT))
+ return 1;
+ if ((mode & VS_STATIC) &&
+ (id > 1) && (id < MIN_D_CONTEXT))
+ return 1;
+ }
+ if (mode & VS_IRQ_MASK) {
+ if ((mode & VS_IRQ) && unlikely(in_interrupt()))
+ return 1;
+ if ((mode & VS_HARDIRQ) && unlikely(in_irq()))
+ return 1;
+ if ((mode & VS_SOFTIRQ) && unlikely(in_softirq()))
+ return 1;
+ }
+ return (((mode & VS_ADMIN) && (cid == 0)) ||
+ ((mode & VS_WATCH) && (cid == 1)) ||
+ ((mode & VS_HOSTID) && (id == 0)));
+}
+
+#define vx_check(c, m) __vs_check(vx_current_xid(), c, (m) | VS_IRQ)
+
+#define vx_weak_check(c, m) ((m) ? vx_check(c, m) : 1)
+
+
+#define nx_check(c, m) __vs_check(nx_current_nid(), c, m)
+
+#define nx_weak_check(c, m) ((m) ? nx_check(c, m) : 1)
+
+#endif
--- a/include/linux/vserver/context_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/context_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,128 @@
+#ifndef _VX_CONTEXT_CMD_H
+#define _VX_CONTEXT_CMD_H
+
+
+/* vinfo commands */
+
+#define VCMD_task_xid VC_CMD(VINFO, 1, 0)
+
+#ifdef __KERNEL__
+extern int vc_task_xid(uint32_t);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_vx_info VC_CMD(VINFO, 5, 0)
+
+struct vcmd_vx_info_v0 {
+ uint32_t xid;
+ uint32_t initpid;
+ /* more to come */
+};
+
+#ifdef __KERNEL__
+extern int vc_vx_info(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_ctx_stat VC_CMD(VSTAT, 0, 0)
+
+struct vcmd_ctx_stat_v0 {
+ uint32_t usecnt;
+ uint32_t tasks;
+ /* more to come */
+};
+
+#ifdef __KERNEL__
+extern int vc_ctx_stat(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+/* context commands */
+
+#define VCMD_ctx_create_v0 VC_CMD(VPROC, 1, 0)
+#define VCMD_ctx_create VC_CMD(VPROC, 1, 1)
+
+struct vcmd_ctx_create {
+ uint64_t flagword;
+};
+
+#define VCMD_ctx_migrate_v0 VC_CMD(PROCMIG, 1, 0)
+#define VCMD_ctx_migrate VC_CMD(PROCMIG, 1, 1)
+
+struct vcmd_ctx_migrate {
+ uint64_t flagword;
+};
+
+#ifdef __KERNEL__
+extern int vc_ctx_create(uint32_t, void __user *);
+extern int vc_ctx_migrate(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+
+/* flag commands */
+
+#define VCMD_get_cflags VC_CMD(FLAGS, 1, 0)
+#define VCMD_set_cflags VC_CMD(FLAGS, 2, 0)
+
+struct vcmd_ctx_flags_v0 {
+ uint64_t flagword;
+ uint64_t mask;
+};
+
+#ifdef __KERNEL__
+extern int vc_get_cflags(struct vx_info *, void __user *);
+extern int vc_set_cflags(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+
+/* context caps commands */
+
+#define VCMD_get_ccaps VC_CMD(FLAGS, 3, 1)
+#define VCMD_set_ccaps VC_CMD(FLAGS, 4, 1)
+
+struct vcmd_ctx_caps_v1 {
+ uint64_t ccaps;
+ uint64_t cmask;
+};
+
+#ifdef __KERNEL__
+extern int vc_get_ccaps(struct vx_info *, void __user *);
+extern int vc_set_ccaps(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+
+/* bcaps commands */
+
+#define VCMD_get_bcaps VC_CMD(FLAGS, 9, 0)
+#define VCMD_set_bcaps VC_CMD(FLAGS, 10, 0)
+
+struct vcmd_bcaps {
+ uint64_t bcaps;
+ uint64_t bmask;
+};
+
+#ifdef __KERNEL__
+extern int vc_get_bcaps(struct vx_info *, void __user *);
+extern int vc_set_bcaps(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+
+/* OOM badness */
+
+#define VCMD_get_badness VC_CMD(MEMCTRL, 5, 0)
+#define VCMD_set_badness VC_CMD(MEMCTRL, 6, 0)
+
+struct vcmd_badness_v0 {
+ int64_t bias;
+};
+
+#ifdef __KERNEL__
+extern int vc_get_badness(struct vx_info *, void __user *);
+extern int vc_set_badness(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_CONTEXT_CMD_H */
--- a/include/linux/vserver/context.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/context.h 2008-04-22 15:14:28.000000000 -0400
@@ -0,0 +1,176 @@
+#ifndef _VX_CONTEXT_H
+#define _VX_CONTEXT_H
+
+#include <linux/types.h>
+#include <linux/capability.h>
+
+
+/* context flags */
+
+#define VXF_INFO_SCHED 0x00000002
+#define VXF_INFO_NPROC 0x00000004
+#define VXF_INFO_PRIVATE 0x00000008
+
+#define VXF_INFO_INIT 0x00000010
+#define VXF_INFO_HIDE 0x00000020
+#define VXF_INFO_ULIMIT 0x00000040
+#define VXF_INFO_NSPACE 0x00000080
+
+#define VXF_SCHED_HARD 0x00000100
+#define VXF_SCHED_PRIO 0x00000200
+#define VXF_SCHED_PAUSE 0x00000400
+
+#define VXF_VIRT_MEM 0x00010000
+#define VXF_VIRT_UPTIME 0x00020000
+#define VXF_VIRT_CPU 0x00040000
+#define VXF_VIRT_LOAD 0x00080000
+#define VXF_VIRT_TIME 0x00100000
+
+#define VXF_HIDE_MOUNT 0x01000000
+/* was VXF_HIDE_NETIF 0x02000000 */
+#define VXF_HIDE_VINFO 0x04000000
+
+#define VXF_STATE_SETUP (1ULL << 32)
+#define VXF_STATE_INIT (1ULL << 33)
+#define VXF_STATE_ADMIN (1ULL << 34)
+
+#define VXF_SC_HELPER (1ULL << 36)
+#define VXF_REBOOT_KILL (1ULL << 37)
+#define VXF_PERSISTENT (1ULL << 38)
+
+#define VXF_FORK_RSS (1ULL << 48)
+#define VXF_PROLIFIC (1ULL << 49)
+
+#define VXF_IGNEG_NICE (1ULL << 52)
+
+#define VXF_ONE_TIME (0x0007ULL << 32)
+
+#define VXF_INIT_SET (VXF_STATE_SETUP | VXF_STATE_INIT | VXF_STATE_ADMIN)
+
+
+/* context migration */
+
+#define VXM_SET_INIT 0x00000001
+#define VXM_SET_REAPER 0x00000002
+
+/* context caps */
+
+#define VXC_CAP_MASK 0x00000000
+
+#define VXC_SET_UTSNAME 0x00000001
+#define VXC_SET_RLIMIT 0x00000002
+
+/* was VXC_RAW_ICMP 0x00000100 */
+#define VXC_SYSLOG 0x00001000
+
+#define VXC_SECURE_MOUNT 0x00010000
+#define VXC_SECURE_REMOUNT 0x00020000
+#define VXC_BINARY_MOUNT 0x00040000
+
+#define VXC_QUOTA_CTL 0x00100000
+#define VXC_ADMIN_MAPPER 0x00200000
+#define VXC_ADMIN_CLOOP 0x00400000
+
+#define VXC_KTHREAD 0x01000000
+
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+
+#include "limit_def.h"
+#include "sched_def.h"
+#include "cvirt_def.h"
+#include "cacct_def.h"
+#include "device_def.h"
+
+struct _vx_info_pc {
+ struct _vx_sched_pc sched_pc;
+ struct _vx_cvirt_pc cvirt_pc;
+};
+
+struct vx_info {
+ struct hlist_node vx_hlist; /* linked list of contexts */
+ xid_t vx_id; /* context id */
+ atomic_t vx_usecnt; /* usage count */
+ atomic_t vx_tasks; /* tasks count */
+ struct vx_info *vx_parent; /* parent context */
+ int vx_state; /* context state */
+
+ unsigned long vx_nsmask; /* assignment mask */
+ struct nsproxy *vx_nsproxy; /* private namespace */
+ struct fs_struct *vx_fs; /* private namespace fs */
+
+ uint64_t vx_flags; /* context flags */
+ uint64_t vx_ccaps; /* context caps (vserver) */
+ kernel_cap_t vx_bcaps; /* bounding caps (system) */
+ // kernel_cap_t vx_cap_bset; /* the guest's bset */
+
+ struct task_struct *vx_reaper; /* guest reaper process */
+ pid_t vx_initpid; /* PID of guest init */
+ int64_t vx_badness_bias; /* OOM points bias */
+
+ struct _vx_limit limit; /* vserver limits */
+ struct _vx_sched sched; /* vserver scheduler */
+ struct _vx_cvirt cvirt; /* virtual/bias stuff */
+ struct _vx_cacct cacct; /* context accounting */
+
+ struct _vx_device dmap; /* default device map targets */
+
+#ifndef CONFIG_SMP
+ struct _vx_info_pc info_pc; /* per cpu data */
+#else
+ struct _vx_info_pc *ptr_pc; /* per cpu array */
+#endif
+
+ wait_queue_head_t vx_wait; /* context exit waitqueue */
+ int reboot_cmd; /* last sys_reboot() cmd */
+ int exit_code; /* last process exit code */
+
+ char vx_name[65]; /* vserver name */
+};
+
+#ifndef CONFIG_SMP
+#define vx_ptr_pc(vxi) (&(vxi)->info_pc)
+#define vx_per_cpu(vxi, v, id) vx_ptr_pc(vxi)->v
+#else
+#define vx_ptr_pc(vxi) ((vxi)->ptr_pc)
+#define vx_per_cpu(vxi, v, id) per_cpu_ptr(vx_ptr_pc(vxi), id)->v
+#endif
+
+#define vx_cpu(vxi, v) vx_per_cpu(vxi, v, smp_processor_id())
+
+
+struct vx_info_save {
+ struct vx_info *vxi;
+ xid_t xid;
+};
+
+
+/* status flags */
+
+#define VXS_HASHED 0x0001
+#define VXS_PAUSED 0x0010
+#define VXS_SHUTDOWN 0x0100
+#define VXS_HELPER 0x1000
+#define VXS_RELEASED 0x8000
+
+
+extern void claim_vx_info(struct vx_info *, struct task_struct *);
+extern void release_vx_info(struct vx_info *, struct task_struct *);
+
+extern struct vx_info *lookup_vx_info(int);
+extern struct vx_info *lookup_or_create_vx_info(int);
+
+extern int get_xid_list(int, unsigned int *, int);
+extern int xid_is_hashed(xid_t);
+
+extern int vx_migrate_task(struct task_struct *, struct vx_info *, int);
+
+extern long vs_state_change(struct vx_info *, unsigned int);
+
+
+#endif /* __KERNEL__ */
+#endif /* _VX_CONTEXT_H */
--- a/include/linux/vserver/cvirt_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/cvirt_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,53 @@
+#ifndef _VX_CVIRT_CMD_H
+#define _VX_CVIRT_CMD_H
+
+
+/* virtual host info name commands */
+
+#define VCMD_set_vhi_name VC_CMD(VHOST, 1, 0)
+#define VCMD_get_vhi_name VC_CMD(VHOST, 2, 0)
+
+struct vcmd_vhi_name_v0 {
+ uint32_t field;
+ char name[65];
+};
+
+
+enum vhi_name_field {
+ VHIN_CONTEXT = 0,
+ VHIN_SYSNAME,
+ VHIN_NODENAME,
+ VHIN_RELEASE,
+ VHIN_VERSION,
+ VHIN_MACHINE,
+ VHIN_DOMAINNAME,
+};
+
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+
+extern int vc_set_vhi_name(struct vx_info *, void __user *);
+extern int vc_get_vhi_name(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_virt_stat VC_CMD(VSTAT, 3, 0)
+
+struct vcmd_virt_stat_v0 {
+ uint64_t offset;
+ uint64_t uptime;
+ uint32_t nr_threads;
+ uint32_t nr_running;
+ uint32_t nr_uninterruptible;
+ uint32_t nr_onhold;
+ uint32_t nr_forks;
+ uint32_t load[3];
+};
+
+#ifdef __KERNEL__
+extern int vc_virt_stat(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_CVIRT_CMD_H */
--- a/include/linux/vserver/cvirt_def.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/cvirt_def.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,80 @@
+#ifndef _VX_CVIRT_DEF_H
+#define _VX_CVIRT_DEF_H
+
+#include <linux/jiffies.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
+#include <linux/time.h>
+#include <asm/atomic.h>
+
+
+struct _vx_usage_stat {
+ uint64_t user;
+ uint64_t nice;
+ uint64_t system;
+ uint64_t softirq;
+ uint64_t irq;
+ uint64_t idle;
+ uint64_t iowait;
+};
+
+struct _vx_syslog {
+ wait_queue_head_t log_wait;
+ spinlock_t logbuf_lock; /* lock for the log buffer */
+
+ unsigned long log_start; /* next char to be read by syslog() */
+ unsigned long con_start; /* next char to be sent to consoles */
+ unsigned long log_end; /* most-recently-written-char + 1 */
+ unsigned long logged_chars; /* #chars since last read+clear operation */
+
+ char log_buf[1024];
+};
+
+
+/* context sub struct */
+
+struct _vx_cvirt {
+ atomic_t nr_threads; /* number of current threads */
+ atomic_t nr_running; /* number of running threads */
+ atomic_t nr_uninterruptible; /* number of uninterruptible threads */
+
+ atomic_t nr_onhold; /* processes on hold */
+ uint32_t onhold_last; /* jiffies when put on hold */
+
+ struct timeval bias_tv; /* time offset to the host */
+ struct timespec bias_idle;
+ struct timespec bias_uptime; /* context creation point */
+ uint64_t bias_clock; /* offset in clock_t */
+
+ spinlock_t load_lock; /* lock for the load averages */
+ atomic_t load_updates; /* nr of load updates done so far */
+ uint32_t load_last; /* last time load was calculated */
+ uint32_t load[3]; /* load averages 1,5,15 */
+
+ atomic_t total_forks; /* number of forks so far */
+
+ struct _vx_syslog syslog;
+};
+
+struct _vx_cvirt_pc {
+ struct _vx_usage_stat cpustat;
+};
+
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_cvirt(struct _vx_cvirt *cvirt)
+{
+ printk("\t_vx_cvirt:\n");
+ printk("\t threads: %4d, %4d, %4d, %4d\n",
+ atomic_read(&cvirt->nr_threads),
+ atomic_read(&cvirt->nr_running),
+ atomic_read(&cvirt->nr_uninterruptible),
+ atomic_read(&cvirt->nr_onhold));
+ /* add rest here */
+ printk("\t total_forks = %d\n", atomic_read(&cvirt->total_forks));
+}
+
+#endif
+
+#endif /* _VX_CVIRT_DEF_H */
--- a/include/linux/vserver/cvirt.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/cvirt.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,20 @@
+#ifndef _VX_CVIRT_H
+#define _VX_CVIRT_H
+
+
+#ifdef __KERNEL__
+
+struct timespec;
+
+void vx_vsi_uptime(struct timespec *, struct timespec *);
+
+
+struct vx_info;
+
+void vx_update_load(struct vx_info *);
+
+
+int vx_do_syslog(int, char __user *, int);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_CVIRT_H */
--- a/include/linux/vserver/debug_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/debug_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,58 @@
+#ifndef _VX_DEBUG_CMD_H
+#define _VX_DEBUG_CMD_H
+
+
+/* debug commands */
+
+#define VCMD_dump_history VC_CMD(DEBUG, 1, 0)
+
+#define VCMD_read_history VC_CMD(DEBUG, 5, 0)
+#define VCMD_read_monitor VC_CMD(DEBUG, 6, 0)
+
+struct vcmd_read_history_v0 {
+ uint32_t index;
+ uint32_t count;
+ char __user *data;
+};
+
+struct vcmd_read_monitor_v0 {
+ uint32_t index;
+ uint32_t count;
+ char __user *data;
+};
+
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct vcmd_read_history_v0_x32 {
+ uint32_t index;
+ uint32_t count;
+ compat_uptr_t data_ptr;
+};
+
+struct vcmd_read_monitor_v0_x32 {
+ uint32_t index;
+ uint32_t count;
+ compat_uptr_t data_ptr;
+};
+
+#endif /* CONFIG_COMPAT */
+
+extern int vc_dump_history(uint32_t);
+
+extern int vc_read_history(uint32_t, void __user *);
+extern int vc_read_monitor(uint32_t, void __user *);
+
+#ifdef CONFIG_COMPAT
+
+extern int vc_read_history_x32(uint32_t, void __user *);
+extern int vc_read_monitor_x32(uint32_t, void __user *);
+
+#endif /* CONFIG_COMPAT */
+
+#endif /* __KERNEL__ */
+#endif /* _VX_DEBUG_CMD_H */
--- a/include/linux/vserver/debug.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/debug.h 2008-04-21 13:55:14.000000000 -0400
@@ -0,0 +1,130 @@
+#ifndef _VX_DEBUG_H
+#define _VX_DEBUG_H
+
+
+#define VXD_CBIT(n, m) (vx_debug_ ## n & (1 << (m)))
+#define VXD_CMIN(n, m) (vx_debug_ ## n > (m))
+#define VXD_MASK(n, m) (vx_debug_ ## n & (m))
+
+#define VXD_DEV(d) (d), (d)->bd_inode->i_ino, \
+ imajor((d)->bd_inode), iminor((d)->bd_inode)
+#define VXF_DEV "%p[%lu,%d:%d]"
+
+
+#define __FUNC__ __func__
+
+
+#define vxd_path(p) \
+ ({ static char _buffer[PATH_MAX]; \
+ d_path(p, _buffer, sizeof(_buffer)); })
+
+#define vxd_cond_path(n) \
+ ((n) ? vxd_path(&(n)->path) : "<null>" )
+
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+extern unsigned int vx_debug_switch;
+extern unsigned int vx_debug_xid;
+extern unsigned int vx_debug_nid;
+extern unsigned int vx_debug_tag;
+extern unsigned int vx_debug_net;
+extern unsigned int vx_debug_limit;
+extern unsigned int vx_debug_cres;
+extern unsigned int vx_debug_dlim;
+extern unsigned int vx_debug_quota;
+extern unsigned int vx_debug_cvirt;
+extern unsigned int vx_debug_space;
+extern unsigned int vx_debug_misc;
+
+
+#define VX_LOGLEVEL "vxD: "
+#define VX_PROC_FMT "%p: "
+#define VX_PROCESS current
+
+#define vxdprintk(c, f, x...) \
+ do { \
+ if (c) \
+ printk(VX_LOGLEVEL VX_PROC_FMT f "\n", \
+ VX_PROCESS , ##x); \
+ } while (0)
+
+#define vxlprintk(c, f, x...) \
+ do { \
+ if (c) \
+ printk(VX_LOGLEVEL f " @%s:%d\n", x); \
+ } while (0)
+
+#define vxfprintk(c, f, x...) \
+ do { \
+ if (c) \
+ printk(VX_LOGLEVEL f " %s@%s:%d\n", x); \
+ } while (0)
+
+
+struct vx_info;
+
+void dump_vx_info(struct vx_info *, int);
+void dump_vx_info_inactive(int);
+
+#else /* CONFIG_VSERVER_DEBUG */
+
+#define vx_debug_switch 0
+#define vx_debug_xid 0
+#define vx_debug_nid 0
+#define vx_debug_tag 0
+#define vx_debug_net 0
+#define vx_debug_limit 0
+#define vx_debug_cres 0
+#define vx_debug_dlim 0
+#define vx_debug_cvirt 0
+
+#define vxdprintk(x...) do { } while (0)
+#define vxlprintk(x...) do { } while (0)
+#define vxfprintk(x...) do { } while (0)
+
+#endif /* CONFIG_VSERVER_DEBUG */
+
+
+#ifdef CONFIG_VSERVER_WARN
+
+#define VX_WARNLEVEL KERN_WARNING "vxW: "
+#define VX_WARN_TASK "[<5B>%s<>,%u:#%u|%u|%u] "
+#define VX_WARN_XID "[xid #%u] "
+#define VX_WARN_NID "[nid #%u] "
+#define VX_WARN_TAG "[tag #%u] "
+
+#define vxwprintk(c, f, x...) \
+ do { \
+ if (c) \
+ printk(VX_WARNLEVEL f "\n", ##x); \
+ } while (0)
+
+#else /* CONFIG_VSERVER_WARN */
+
+#define vxwprintk(x...) do { } while (0)
+
+#endif /* CONFIG_VSERVER_WARN */
+
+#define vxwprintk_task(c, f, x...) \
+ vxwprintk(c, VX_WARN_TASK f, \
+ current->comm, current->pid, \
+ current->xid, current->nid, current->tag, ##x)
+#define vxwprintk_xid(c, f, x...) \
+ vxwprintk(c, VX_WARN_XID f, current->xid, x)
+#define vxwprintk_nid(c, f, x...) \
+ vxwprintk(c, VX_WARN_NID f, current->nid, x)
+#define vxwprintk_tag(c, f, x...) \
+ vxwprintk(c, VX_WARN_TAG f, current->tag, x)
+
+#ifdef CONFIG_VSERVER_DEBUG
+#define vxd_assert_lock(l) assert_spin_locked(l)
+#define vxd_assert(c, f, x...) vxlprintk(!(c), \
+ "assertion [" f "] failed.", ##x, __FILE__, __LINE__)
+#else
+#define vxd_assert_lock(l) do { } while (0)
+#define vxd_assert(c, f, x...) do { } while (0)
+#endif
+
+
+#endif /* _VX_DEBUG_H */
--- a/include/linux/vserver/device_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/device_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,44 @@
+#ifndef _VX_DEVICE_CMD_H
+#define _VX_DEVICE_CMD_H
+
+
+/* device vserver commands */
+
+#define VCMD_set_mapping VC_CMD(DEVICE, 1, 0)
+#define VCMD_unset_mapping VC_CMD(DEVICE, 2, 0)
+
+struct vcmd_set_mapping_v0 {
+ const char __user *device;
+ const char __user *target;
+ uint32_t flags;
+};
+
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct vcmd_set_mapping_v0_x32 {
+ compat_uptr_t device_ptr;
+ compat_uptr_t target_ptr;
+ uint32_t flags;
+};
+
+#endif /* CONFIG_COMPAT */
+
+#include <linux/compiler.h>
+
+extern int vc_set_mapping(struct vx_info *, void __user *);
+extern int vc_unset_mapping(struct vx_info *, void __user *);
+
+#ifdef CONFIG_COMPAT
+
+extern int vc_set_mapping_x32(struct vx_info *, void __user *);
+extern int vc_unset_mapping_x32(struct vx_info *, void __user *);
+
+#endif /* CONFIG_COMPAT */
+
+#endif /* __KERNEL__ */
+#endif /* _VX_DEVICE_CMD_H */
--- a/include/linux/vserver/device_def.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/device_def.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,17 @@
+#ifndef _VX_DEVICE_DEF_H
+#define _VX_DEVICE_DEF_H
+
+#include <linux/types.h>
+
+struct vx_dmap_target {
+ dev_t target;
+ uint32_t flags;
+};
+
+struct _vx_device {
+#ifdef CONFIG_VSERVER_DEVICE
+ struct vx_dmap_target targets[2];
+#endif
+};
+
+#endif /* _VX_DEVICE_DEF_H */
--- a/include/linux/vserver/device.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/device.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,15 @@
+#ifndef _VX_DEVICE_H
+#define _VX_DEVICE_H
+
+
+#define DATTR_CREATE 0x00000001
+#define DATTR_OPEN 0x00000002
+
+#define DATTR_REMAP 0x00000010
+
+#define DATTR_MASK 0x00000013
+
+
+#else /* _VX_DEVICE_H */
+#warning duplicate inclusion
+#endif /* _VX_DEVICE_H */
--- a/include/linux/vserver/dlimit_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/dlimit_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,74 @@
+#ifndef _VX_DLIMIT_CMD_H
+#define _VX_DLIMIT_CMD_H
+
+
+/* dlimit vserver commands */
+
+#define VCMD_add_dlimit VC_CMD(DLIMIT, 1, 0)
+#define VCMD_rem_dlimit VC_CMD(DLIMIT, 2, 0)
+
+#define VCMD_set_dlimit VC_CMD(DLIMIT, 5, 0)
+#define VCMD_get_dlimit VC_CMD(DLIMIT, 6, 0)
+
+struct vcmd_ctx_dlimit_base_v0 {
+ const char __user *name;
+ uint32_t flags;
+};
+
+struct vcmd_ctx_dlimit_v0 {
+ const char __user *name;
+ uint32_t space_used; /* used space in kbytes */
+ uint32_t space_total; /* maximum space in kbytes */
+ uint32_t inodes_used; /* used inodes */
+ uint32_t inodes_total; /* maximum inodes */
+ uint32_t reserved; /* reserved for root in % */
+ uint32_t flags;
+};
+
+#define CDLIM_UNSET ((uint32_t)0UL)
+#define CDLIM_INFINITY ((uint32_t)~0UL)
+#define CDLIM_KEEP ((uint32_t)~1UL)
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct vcmd_ctx_dlimit_base_v0_x32 {
+ compat_uptr_t name_ptr;
+ uint32_t flags;
+};
+
+struct vcmd_ctx_dlimit_v0_x32 {
+ compat_uptr_t name_ptr;
+ uint32_t space_used; /* used space in kbytes */
+ uint32_t space_total; /* maximum space in kbytes */
+ uint32_t inodes_used; /* used inodes */
+ uint32_t inodes_total; /* maximum inodes */
+ uint32_t reserved; /* reserved for root in % */
+ uint32_t flags;
+};
+
+#endif /* CONFIG_COMPAT */
+
+#include <linux/compiler.h>
+
+extern int vc_add_dlimit(uint32_t, void __user *);
+extern int vc_rem_dlimit(uint32_t, void __user *);
+
+extern int vc_set_dlimit(uint32_t, void __user *);
+extern int vc_get_dlimit(uint32_t, void __user *);
+
+#ifdef CONFIG_COMPAT
+
+extern int vc_add_dlimit_x32(uint32_t, void __user *);
+extern int vc_rem_dlimit_x32(uint32_t, void __user *);
+
+extern int vc_set_dlimit_x32(uint32_t, void __user *);
+extern int vc_get_dlimit_x32(uint32_t, void __user *);
+
+#endif /* CONFIG_COMPAT */
+
+#endif /* __KERNEL__ */
+#endif /* _VX_DLIMIT_CMD_H */
--- a/include/linux/vserver/dlimit.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/dlimit.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,54 @@
+#ifndef _VX_DLIMIT_H
+#define _VX_DLIMIT_H
+
+#include "switch.h"
+
+
+#ifdef __KERNEL__
+
+/* keep in sync with CDLIM_INFINITY */
+
+#define DLIM_INFINITY (~0ULL)
+
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+
+struct super_block;
+
+struct dl_info {
+ struct hlist_node dl_hlist; /* linked list of contexts */
+ struct rcu_head dl_rcu; /* the rcu head */
+ tag_t dl_tag; /* context tag */
+ atomic_t dl_usecnt; /* usage count */
+ atomic_t dl_refcnt; /* reference count */
+
+ struct super_block *dl_sb; /* associated superblock */
+
+ spinlock_t dl_lock; /* protect the values */
+
+ unsigned long long dl_space_used; /* used space in bytes */
+ unsigned long long dl_space_total; /* maximum space in bytes */
+ unsigned long dl_inodes_used; /* used inodes */
+ unsigned long dl_inodes_total; /* maximum inodes */
+
+ unsigned int dl_nrlmult; /* non root limit mult */
+};
+
+struct rcu_head;
+
+extern void rcu_free_dl_info(struct rcu_head *);
+extern void unhash_dl_info(struct dl_info *);
+
+extern struct dl_info *locate_dl_info(struct super_block *, tag_t);
+
+
+struct kstatfs;
+
+extern void vx_vsi_statfs(struct super_block *, struct kstatfs *);
+
+typedef uint64_t dlsize_t;
+
+#endif /* __KERNEL__ */
+#else /* _VX_DLIMIT_H */
+#warning duplicate inclusion
+#endif /* _VX_DLIMIT_H */
--- a/include/linux/vserver/global.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/global.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,20 @@
+#ifndef _VX_GLOBAL_H
+#define _VX_GLOBAL_H
+
+
+extern atomic_t vx_global_ctotal;
+extern atomic_t vx_global_cactive;
+
+extern atomic_t nx_global_ctotal;
+extern atomic_t nx_global_cactive;
+
+extern atomic_t vs_global_nsproxy;
+extern atomic_t vs_global_fs;
+extern atomic_t vs_global_mnt_ns;
+extern atomic_t vs_global_uts_ns;
+extern atomic_t vs_global_ipc_ns;
+extern atomic_t vs_global_user_ns;
+extern atomic_t vs_global_pid_ns;
+
+
+#endif /* _VX_GLOBAL_H */
--- a/include/linux/vserver/history.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/history.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,197 @@
+#ifndef _VX_HISTORY_H
+#define _VX_HISTORY_H
+
+
+enum {
+ VXH_UNUSED = 0,
+ VXH_THROW_OOPS = 1,
+
+ VXH_GET_VX_INFO,
+ VXH_PUT_VX_INFO,
+ VXH_INIT_VX_INFO,
+ VXH_SET_VX_INFO,
+ VXH_CLR_VX_INFO,
+ VXH_CLAIM_VX_INFO,
+ VXH_RELEASE_VX_INFO,
+ VXH_ALLOC_VX_INFO,
+ VXH_DEALLOC_VX_INFO,
+ VXH_HASH_VX_INFO,
+ VXH_UNHASH_VX_INFO,
+ VXH_LOC_VX_INFO,
+ VXH_LOOKUP_VX_INFO,
+ VXH_CREATE_VX_INFO,
+};
+
+struct _vxhe_vxi {
+ struct vx_info *ptr;
+ unsigned xid;
+ unsigned usecnt;
+ unsigned tasks;
+};
+
+struct _vxhe_set_clr {
+ void *data;
+};
+
+struct _vxhe_loc_lookup {
+ unsigned arg;
+};
+
+struct _vx_hist_entry {
+ void *loc;
+ unsigned short seq;
+ unsigned short type;
+ struct _vxhe_vxi vxi;
+ union {
+ struct _vxhe_set_clr sc;
+ struct _vxhe_loc_lookup ll;
+ };
+};
+
+#ifdef CONFIG_VSERVER_HISTORY
+
+extern unsigned volatile int vxh_active;
+
+struct _vx_hist_entry *vxh_advance(void *loc);
+
+
+static inline
+void __vxh_copy_vxi(struct _vx_hist_entry *entry, struct vx_info *vxi)
+{
+ entry->vxi.ptr = vxi;
+ if (vxi) {
+ entry->vxi.usecnt = atomic_read(&vxi->vx_usecnt);
+ entry->vxi.tasks = atomic_read(&vxi->vx_tasks);
+ entry->vxi.xid = vxi->vx_id;
+ }
+}
+
+
+#define __HERE__ current_text_addr()
+
+#define __VXH_BODY(__type, __data, __here) \
+ struct _vx_hist_entry *entry; \
+ \
+ preempt_disable(); \
+ entry = vxh_advance(__here); \
+ __data; \
+ entry->type = __type; \
+ preempt_enable();
+
+
+ /* pass vxi only */
+
+#define __VXH_SMPL \
+ __vxh_copy_vxi(entry, vxi)
+
+static inline
+void __vxh_smpl(struct vx_info *vxi, int __type, void *__here)
+{
+ __VXH_BODY(__type, __VXH_SMPL, __here)
+}
+
+ /* pass vxi and data (void *) */
+
+#define __VXH_DATA \
+ __vxh_copy_vxi(entry, vxi); \
+ entry->sc.data = data
+
+static inline
+void __vxh_data(struct vx_info *vxi, void *data,
+ int __type, void *__here)
+{
+ __VXH_BODY(__type, __VXH_DATA, __here)
+}
+
+ /* pass vxi and arg (long) */
+
+#define __VXH_LONG \
+ __vxh_copy_vxi(entry, vxi); \
+ entry->ll.arg = arg
+
+static inline
+void __vxh_long(struct vx_info *vxi, long arg,
+ int __type, void *__here)
+{
+ __VXH_BODY(__type, __VXH_LONG, __here)
+}
+
+
+static inline
+void __vxh_throw_oops(void *__here)
+{
+ __VXH_BODY(VXH_THROW_OOPS, {}, __here);
+ /* prevent further acquisition */
+ vxh_active = 0;
+}
+
+
+#define vxh_throw_oops() __vxh_throw_oops(__HERE__);
+
+#define __vxh_get_vx_info(v, h) __vxh_smpl(v, VXH_GET_VX_INFO, h);
+#define __vxh_put_vx_info(v, h) __vxh_smpl(v, VXH_PUT_VX_INFO, h);
+
+#define __vxh_init_vx_info(v, d, h) \
+ __vxh_data(v, d, VXH_INIT_VX_INFO, h);
+#define __vxh_set_vx_info(v, d, h) \
+ __vxh_data(v, d, VXH_SET_VX_INFO, h);
+#define __vxh_clr_vx_info(v, d, h) \
+ __vxh_data(v, d, VXH_CLR_VX_INFO, h);
+
+#define __vxh_claim_vx_info(v, d, h) \
+ __vxh_data(v, d, VXH_CLAIM_VX_INFO, h);
+#define __vxh_release_vx_info(v, d, h) \
+ __vxh_data(v, d, VXH_RELEASE_VX_INFO, h);
+
+#define vxh_alloc_vx_info(v) \
+ __vxh_smpl(v, VXH_ALLOC_VX_INFO, __HERE__);
+#define vxh_dealloc_vx_info(v) \
+ __vxh_smpl(v, VXH_DEALLOC_VX_INFO, __HERE__);
+
+#define vxh_hash_vx_info(v) \
+ __vxh_smpl(v, VXH_HASH_VX_INFO, __HERE__);
+#define vxh_unhash_vx_info(v) \
+ __vxh_smpl(v, VXH_UNHASH_VX_INFO, __HERE__);
+
+#define vxh_loc_vx_info(v, l) \
+ __vxh_long(v, l, VXH_LOC_VX_INFO, __HERE__);
+#define vxh_lookup_vx_info(v, l) \
+ __vxh_long(v, l, VXH_LOOKUP_VX_INFO, __HERE__);
+#define vxh_create_vx_info(v, l) \
+ __vxh_long(v, l, VXH_CREATE_VX_INFO, __HERE__);
+
+extern void vxh_dump_history(void);
+
+
+#else /* CONFIG_VSERVER_HISTORY */
+
+#define __HERE__ 0
+
+#define vxh_throw_oops() do { } while (0)
+
+#define __vxh_get_vx_info(v, h) do { } while (0)
+#define __vxh_put_vx_info(v, h) do { } while (0)
+
+#define __vxh_init_vx_info(v, d, h) do { } while (0)
+#define __vxh_set_vx_info(v, d, h) do { } while (0)
+#define __vxh_clr_vx_info(v, d, h) do { } while (0)
+
+#define __vxh_claim_vx_info(v, d, h) do { } while (0)
+#define __vxh_release_vx_info(v, d, h) do { } while (0)
+
+#define vxh_alloc_vx_info(v) do { } while (0)
+#define vxh_dealloc_vx_info(v) do { } while (0)
+
+#define vxh_hash_vx_info(v) do { } while (0)
+#define vxh_unhash_vx_info(v) do { } while (0)
+
+#define vxh_loc_vx_info(v, l) do { } while (0)
+#define vxh_lookup_vx_info(v, l) do { } while (0)
+#define vxh_create_vx_info(v, l) do { } while (0)
+
+#define vxh_dump_history() do { } while (0)
+
+
+#endif /* CONFIG_VSERVER_HISTORY */
+
+#endif /* _VX_HISTORY_H */
--- a/include/linux/vserver/inode_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/inode_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,59 @@
+#ifndef _VX_INODE_CMD_H
+#define _VX_INODE_CMD_H
+
+
+/* inode vserver commands */
+
+#define VCMD_get_iattr VC_CMD(INODE, 1, 1)
+#define VCMD_set_iattr VC_CMD(INODE, 2, 1)
+
+#define VCMD_fget_iattr VC_CMD(INODE, 3, 0)
+#define VCMD_fset_iattr VC_CMD(INODE, 4, 0)
+
+struct vcmd_ctx_iattr_v1 {
+ const char __user *name;
+ uint32_t tag;
+ uint32_t flags;
+ uint32_t mask;
+};
+
+struct vcmd_ctx_fiattr_v0 {
+ uint32_t tag;
+ uint32_t flags;
+ uint32_t mask;
+};
+
+
+#ifdef __KERNEL__
+
+
+#ifdef CONFIG_COMPAT
+
+#include <asm/compat.h>
+
+struct vcmd_ctx_iattr_v1_x32 {
+ compat_uptr_t name_ptr;
+ uint32_t tag;
+ uint32_t flags;
+ uint32_t mask;
+};
+
+#endif /* CONFIG_COMPAT */
+
+#include <linux/compiler.h>
+
+extern int vc_get_iattr(void __user *);
+extern int vc_set_iattr(void __user *);
+
+extern int vc_fget_iattr(uint32_t, void __user *);
+extern int vc_fset_iattr(uint32_t, void __user *);
+
+#ifdef CONFIG_COMPAT
+
+extern int vc_get_iattr_x32(void __user *);
+extern int vc_set_iattr_x32(void __user *);
+
+#endif /* CONFIG_COMPAT */
+
+#endif /* __KERNEL__ */
+#endif /* _VX_INODE_CMD_H */
--- a/include/linux/vserver/inode.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/inode.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,38 @@
+#ifndef _VX_INODE_H
+#define _VX_INODE_H
+
+
+#define IATTR_TAG 0x01000000
+
+#define IATTR_ADMIN 0x00000001
+#define IATTR_WATCH 0x00000002
+#define IATTR_HIDE 0x00000004
+#define IATTR_FLAGS 0x00000007
+
+#define IATTR_BARRIER 0x00010000
+#define IATTR_IUNLINK 0x00020000
+#define IATTR_IMMUTABLE 0x00040000
+
+#ifdef __KERNEL__
+
+
+#ifdef CONFIG_VSERVER_PROC_SECURE
+#define IATTR_PROC_DEFAULT ( IATTR_ADMIN | IATTR_HIDE )
+#define IATTR_PROC_SYMLINK ( IATTR_ADMIN )
+#else
+#define IATTR_PROC_DEFAULT ( IATTR_ADMIN )
+#define IATTR_PROC_SYMLINK ( IATTR_ADMIN )
+#endif
+
+#define vx_hide_check(c, m) (((m) & IATTR_HIDE) ? vx_check(c, m) : 1)
+
+#endif /* __KERNEL__ */
+
+/* inode ioctls */
+
+#define FIOC_GETXFLG _IOR('x', 5, long)
+#define FIOC_SETXFLG _IOW('x', 6, long)
+
+#else /* _VX_INODE_H */
+#warning duplicate inclusion
+#endif /* _VX_INODE_H */
--- a/include/linux/vserver/Kbuild 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/Kbuild 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,8 @@
+
+unifdef-y += context_cmd.h network_cmd.h space_cmd.h \
+ cacct_cmd.h cvirt_cmd.h limit_cmd.h dlimit_cmd.h \
+ inode_cmd.h tag_cmd.h sched_cmd.h signal_cmd.h \
+ debug_cmd.h device_cmd.h
+
+unifdef-y += switch.h network.h monitor.h inode.h device.h
+
--- a/include/linux/vserver/limit_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/limit_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,69 @@
+#ifndef _VX_LIMIT_CMD_H
+#define _VX_LIMIT_CMD_H
+
+
+/* rlimit vserver commands */
+
+#define VCMD_get_rlimit VC_CMD(RLIMIT, 1, 0)
+#define VCMD_set_rlimit VC_CMD(RLIMIT, 2, 0)
+#define VCMD_get_rlimit_mask VC_CMD(RLIMIT, 3, 0)
+#define VCMD_reset_minmax VC_CMD(RLIMIT, 9, 0)
+
+struct vcmd_ctx_rlimit_v0 {
+ uint32_t id;
+ uint64_t minimum;
+ uint64_t softlimit;
+ uint64_t maximum;
+};
+
+struct vcmd_ctx_rlimit_mask_v0 {
+ uint32_t minimum;
+ uint32_t softlimit;
+ uint32_t maximum;
+};
+
+#define VCMD_rlimit_stat VC_CMD(VSTAT, 1, 0)
+
+struct vcmd_rlimit_stat_v0 {
+ uint32_t id;
+ uint32_t hits;
+ uint64_t value;
+ uint64_t minimum;
+ uint64_t maximum;
+};
+
+#define CRLIM_UNSET (0ULL)
+#define CRLIM_INFINITY (~0ULL)
+#define CRLIM_KEEP (~1ULL)
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_IA32_EMULATION
+
+struct vcmd_ctx_rlimit_v0_x32 {
+ uint32_t id;
+ uint64_t minimum;
+ uint64_t softlimit;
+ uint64_t maximum;
+} __attribute__ ((aligned (4)));
+
+#endif /* CONFIG_IA32_EMULATION */
+
+#include <linux/compiler.h>
+
+extern int vc_get_rlimit_mask(uint32_t, void __user *);
+extern int vc_get_rlimit(struct vx_info *, void __user *);
+extern int vc_set_rlimit(struct vx_info *, void __user *);
+extern int vc_reset_minmax(struct vx_info *, void __user *);
+
+extern int vc_rlimit_stat(struct vx_info *, void __user *);
+
+#ifdef CONFIG_IA32_EMULATION
+
+extern int vc_get_rlimit_x32(struct vx_info *, void __user *);
+extern int vc_set_rlimit_x32(struct vx_info *, void __user *);
+
+#endif /* CONFIG_IA32_EMULATION */
+
+#endif /* __KERNEL__ */
+#endif /* _VX_LIMIT_CMD_H */
--- a/include/linux/vserver/limit_def.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/limit_def.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,47 @@
+#ifndef _VX_LIMIT_DEF_H
+#define _VX_LIMIT_DEF_H
+
+#include <asm/atomic.h>
+#include <asm/resource.h>
+
+#include "limit.h"
+
+
+struct _vx_res_limit {
+ rlim_t soft; /* Context soft limit */
+ rlim_t hard; /* Context hard limit */
+
+ rlim_atomic_t rcur; /* Current value */
+ rlim_t rmin; /* Context minimum */
+ rlim_t rmax; /* Context maximum */
+
+ atomic_t lhit; /* Limit hits */
+};
+
+/* context sub struct */
+
+struct _vx_limit {
+ struct _vx_res_limit res[NUM_LIMITS];
+};
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_limit(struct _vx_limit *limit)
+{
+ int i;
+
+ printk("\t_vx_limit:");
+ for (i = 0; i < NUM_LIMITS; i++) {
+ printk("\t [%2d] = %8lu %8lu/%8lu, %8ld/%8ld, %8d\n",
+ i, (unsigned long)__rlim_get(limit, i),
+ (unsigned long)__rlim_rmin(limit, i),
+ (unsigned long)__rlim_rmax(limit, i),
+ (long)__rlim_soft(limit, i),
+ (long)__rlim_hard(limit, i),
+ atomic_read(&__rlim_lhit(limit, i)));
+ }
+}
+
+#endif
+
+#endif /* _VX_LIMIT_DEF_H */
--- a/include/linux/vserver/limit.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/limit.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,70 @@
+#ifndef _VX_LIMIT_H
+#define _VX_LIMIT_H
+
+#define VLIMIT_NSOCK 16
+#define VLIMIT_OPENFD 17
+#define VLIMIT_ANON 18
+#define VLIMIT_SHMEM 19
+#define VLIMIT_SEMARY 20
+#define VLIMIT_NSEMS 21
+#define VLIMIT_DENTRY 22
+#define VLIMIT_MAPPED 23
+
+
+#ifdef __KERNEL__
+
+#define VLIM_NOCHECK ((1L << VLIMIT_DENTRY) | (1L << RLIMIT_RSS))
+
+/* keep in sync with CRLIM_INFINITY */
+
+#define VLIM_INFINITY (~0ULL)
+
+#include <asm/atomic.h>
+#include <asm/resource.h>
+
+#ifndef RLIM_INFINITY
+#warning RLIM_INFINITY is undefined
+#endif
+
+#define __rlim_val(l, r, v) ((l)->res[r].v)
+
+#define __rlim_soft(l, r) __rlim_val(l, r, soft)
+#define __rlim_hard(l, r) __rlim_val(l, r, hard)
+
+#define __rlim_rcur(l, r) __rlim_val(l, r, rcur)
+#define __rlim_rmin(l, r) __rlim_val(l, r, rmin)
+#define __rlim_rmax(l, r) __rlim_val(l, r, rmax)
+
+#define __rlim_lhit(l, r) __rlim_val(l, r, lhit)
+#define __rlim_hit(l, r) atomic_inc(&__rlim_lhit(l, r))
+
+typedef atomic_long_t rlim_atomic_t;
+typedef unsigned long rlim_t;
+
+#define __rlim_get(l, r) atomic_long_read(&__rlim_rcur(l, r))
+#define __rlim_set(l, r, v) atomic_long_set(&__rlim_rcur(l, r), v)
+#define __rlim_inc(l, r) atomic_long_inc(&__rlim_rcur(l, r))
+#define __rlim_dec(l, r) atomic_long_dec(&__rlim_rcur(l, r))
+#define __rlim_add(l, r, v) atomic_long_add(v, &__rlim_rcur(l, r))
+#define __rlim_sub(l, r, v) atomic_long_sub(v, &__rlim_rcur(l, r))
+
+
+#if (RLIM_INFINITY == VLIM_INFINITY)
+#define VX_VLIM(r) ((long long)(long)(r))
+#define VX_RLIM(v) ((rlim_t)(v))
+#else
+#define VX_VLIM(r) (((r) == RLIM_INFINITY) \
+ ? VLIM_INFINITY : (long long)(r))
+#define VX_RLIM(v) (((v) == VLIM_INFINITY) \
+ ? RLIM_INFINITY : (rlim_t)(v))
+#endif
+
+struct sysinfo;
+
+void vx_vsi_meminfo(struct sysinfo *);
+void vx_vsi_swapinfo(struct sysinfo *);
+
+#define NUM_LIMITS 24
+
+#endif /* __KERNEL__ */
+#endif /* _VX_LIMIT_H */
--- a/include/linux/vserver/limit_int.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/limit_int.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,198 @@
+#ifndef _VX_LIMIT_INT_H
+#define _VX_LIMIT_INT_H
+
+#include "context.h"
+
+#ifdef __KERNEL__
+
+#define VXD_RCRES_COND(r) VXD_CBIT(cres, r)
+#define VXD_RLIMIT_COND(r) VXD_CBIT(limit, r)
+
+extern const char *vlimit_name[NUM_LIMITS];
+
+static inline void __vx_acc_cres(struct vx_info *vxi,
+ int res, int dir, void *_data, char *_file, int _line)
+{
+ if (VXD_RCRES_COND(res))
+ vxlprintk(1, "vx_acc_cres[%5d,%s,%2d]: %5ld%s (%p)",
+ (vxi ? vxi->vx_id : -1), vlimit_name[res], res,
+ (vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
+ (dir > 0) ? "++" : "--", _data, _file, _line);
+ if (!vxi)
+ return;
+
+ if (dir > 0)
+ __rlim_inc(&vxi->limit, res);
+ else
+ __rlim_dec(&vxi->limit, res);
+}
+
+static inline void __vx_add_cres(struct vx_info *vxi,
+ int res, int amount, void *_data, char *_file, int _line)
+{
+ if (VXD_RCRES_COND(res))
+ vxlprintk(1, "vx_add_cres[%5d,%s,%2d]: %5ld += %5d (%p)",
+ (vxi ? vxi->vx_id : -1), vlimit_name[res], res,
+ (vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
+ amount, _data, _file, _line);
+ if (amount == 0)
+ return;
+ if (!vxi)
+ return;
+ __rlim_add(&vxi->limit, res, amount);
+}
+
+static inline
+int __vx_cres_adjust_max(struct _vx_limit *limit, int res, rlim_t value)
+{
+ int cond = (value > __rlim_rmax(limit, res));
+
+ if (cond)
+ __rlim_rmax(limit, res) = value;
+ return cond;
+}
+
+static inline
+int __vx_cres_adjust_min(struct _vx_limit *limit, int res, rlim_t value)
+{
+ int cond = (value < __rlim_rmin(limit, res));
+
+ if (cond)
+ __rlim_rmin(limit, res) = value;
+ return cond;
+}
+
+static inline
+void __vx_cres_fixup(struct _vx_limit *limit, int res, rlim_t value)
+{
+ if (!__vx_cres_adjust_max(limit, res, value))
+ __vx_cres_adjust_min(limit, res, value);
+}
+
+
+/* return values:
+ +1 ... no limit hit
+ -1 ... over soft limit
+ 0 ... over hard limit */
+
+static inline int __vx_cres_avail(struct vx_info *vxi,
+ int res, int num, char *_file, int _line)
+{
+ struct _vx_limit *limit;
+ rlim_t value;
+
+ if (VXD_RLIMIT_COND(res))
+ vxlprintk(1, "vx_cres_avail[%5d,%s,%2d]: %5ld/%5ld > %5ld + %5d",
+ (vxi ? vxi->vx_id : -1), vlimit_name[res], res,
+ (vxi ? (long)__rlim_soft(&vxi->limit, res) : -1),
+ (vxi ? (long)__rlim_hard(&vxi->limit, res) : -1),
+ (vxi ? (long)__rlim_get(&vxi->limit, res) : 0),
+ num, _file, _line);
+ if (!vxi)
+ return 1;
+
+ limit = &vxi->limit;
+ value = __rlim_get(limit, res);
+
+ if (!__vx_cres_adjust_max(limit, res, value))
+ __vx_cres_adjust_min(limit, res, value);
+
+ if (num == 0)
+ return 1;
+
+ if (__rlim_soft(limit, res) == RLIM_INFINITY)
+ return -1;
+ if (value + num <= __rlim_soft(limit, res))
+ return -1;
+
+ if (__rlim_hard(limit, res) == RLIM_INFINITY)
+ return 1;
+ if (value + num <= __rlim_hard(limit, res))
+ return 1;
+
+ __rlim_hit(limit, res);
+ return 0;
+}
+
+
+static const int VLA_RSS[] = { RLIMIT_RSS, VLIMIT_ANON, VLIMIT_MAPPED, 0 };
+
+static inline
+rlim_t __vx_cres_array_sum(struct _vx_limit *limit, const int *array)
+{
+ rlim_t value, sum = 0;
+ int res;
+
+ while ((res = *array++)) {
+ value = __rlim_get(limit, res);
+ __vx_cres_fixup(limit, res, value);
+ sum += value;
+ }
+ return sum;
+}
+
+static inline
+rlim_t __vx_cres_array_fixup(struct _vx_limit *limit, const int *array)
+{
+ rlim_t value = __vx_cres_array_sum(limit, array + 1);
+ int res = *array;
+
+ if (value == __rlim_get(limit, res))
+ return value;
+
+ __rlim_set(limit, res, value);
+ /* now adjust min/max */
+ if (!__vx_cres_adjust_max(limit, res, value))
+ __vx_cres_adjust_min(limit, res, value);
+
+ return value;
+}
+
+static inline int __vx_cres_array_avail(struct vx_info *vxi,
+ const int *array, int num, char *_file, int _line)
+{
+ struct _vx_limit *limit;
+ rlim_t value = 0;
+ int res;
+
+ if (num == 0)
+ return 1;
+ if (!vxi)
+ return 1;
+
+ limit = &vxi->limit;
+ res = *array;
+ value = __vx_cres_array_sum(limit, array + 1);
+
+ __rlim_set(limit, res, value);
+ __vx_cres_fixup(limit, res, value);
+
+ return __vx_cres_avail(vxi, res, num, _file, _line);
+}
+
+
+static inline void vx_limit_fixup(struct _vx_limit *limit, int id)
+{
+ rlim_t value;
+ int res;
+
+ /* complex resources first */
+ if ((id < 0) || (id == RLIMIT_RSS))
+ __vx_cres_array_fixup(limit, VLA_RSS);
+
+ for (res = 0; res < NUM_LIMITS; res++) {
+ if ((id > 0) && (res != id))
+ continue;
+
+ value = __rlim_get(limit, res);
+ __vx_cres_fixup(limit, res, value);
+
+ /* not supposed to happen, maybe warn? */
+ if (__rlim_rmax(limit, res) > __rlim_hard(limit, res))
+ __rlim_rmax(limit, res) = __rlim_hard(limit, res);
+ }
+}
+
+
+#endif /* __KERNEL__ */
+#endif /* _VX_LIMIT_INT_H */
--- a/include/linux/vserver/monitor.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/monitor.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,96 @@
+#ifndef _VX_MONITOR_H
+#define _VX_MONITOR_H
+
+#include <linux/types.h>
+
+enum {
+ VXM_UNUSED = 0,
+
+ VXM_SYNC = 0x10,
+
+ VXM_UPDATE = 0x20,
+ VXM_UPDATE_1,
+ VXM_UPDATE_2,
+
+ VXM_RQINFO_1 = 0x24,
+ VXM_RQINFO_2,
+
+ VXM_ACTIVATE = 0x40,
+ VXM_DEACTIVATE,
+ VXM_IDLE,
+
+ VXM_HOLD = 0x44,
+ VXM_UNHOLD,
+
+ VXM_MIGRATE = 0x48,
+ VXM_RESCHED,
+
+ /* all other bits are flags */
+ VXM_SCHED = 0x80,
+};
+
+struct _vxm_update_1 {
+ uint32_t tokens_max;
+ uint32_t fill_rate;
+ uint32_t interval;
+};
+
+struct _vxm_update_2 {
+ uint32_t tokens_min;
+ uint32_t fill_rate;
+ uint32_t interval;
+};
+
+struct _vxm_rqinfo_1 {
+ uint16_t running;
+ uint16_t onhold;
+ uint16_t iowait;
+ uint16_t uintr;
+ uint32_t idle_tokens;
+};
+
+struct _vxm_rqinfo_2 {
+ uint32_t norm_time;
+ uint32_t idle_time;
+ uint32_t idle_skip;
+};
+
+struct _vxm_sched {
+ uint32_t tokens;
+ uint32_t norm_time;
+ uint32_t idle_time;
+};
+
+struct _vxm_task {
+ uint16_t pid;
+ uint16_t state;
+};
+
+struct _vxm_event {
+ uint32_t jif;
+ union {
+ uint32_t seq;
+ uint32_t sec;
+ };
+ union {
+ uint32_t tokens;
+ uint32_t nsec;
+ struct _vxm_task tsk;
+ };
+};
+
+struct _vx_mon_entry {
+ uint16_t type;
+ uint16_t xid;
+ union {
+ struct _vxm_event ev;
+ struct _vxm_sched sd;
+ struct _vxm_update_1 u1;
+ struct _vxm_update_2 u2;
+ struct _vxm_rqinfo_1 q1;
+ struct _vxm_rqinfo_2 q2;
+ };
+};
+
+
+#endif /* _VX_MONITOR_H */
--- a/include/linux/vserver/network_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/network_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,150 @@
+#ifndef _VX_NETWORK_CMD_H
+#define _VX_NETWORK_CMD_H
+
+
+/* vinfo commands */
+
+#define VCMD_task_nid VC_CMD(VINFO, 2, 0)
+
+#ifdef __KERNEL__
+extern int vc_task_nid(uint32_t);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_nx_info VC_CMD(VINFO, 6, 0)
+
+struct vcmd_nx_info_v0 {
+ uint32_t nid;
+ /* more to come */
+};
+
+#ifdef __KERNEL__
+extern int vc_nx_info(struct nx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+#include <linux/in.h>
+#include <linux/in6.h>
+
+#define VCMD_net_create_v0 VC_CMD(VNET, 1, 0)
+#define VCMD_net_create VC_CMD(VNET, 1, 1)
+
+struct vcmd_net_create {
+ uint64_t flagword;
+};
+
+#define VCMD_net_migrate VC_CMD(NETMIG, 1, 0)
+
+#define VCMD_net_add VC_CMD(NETALT, 1, 0)
+#define VCMD_net_remove VC_CMD(NETALT, 2, 0)
+
+struct vcmd_net_addr_v0 {
+ uint16_t type;
+ uint16_t count;
+ struct in_addr ip[4];
+ struct in_addr mask[4];
+};
+
+#define VCMD_net_add_ipv4 VC_CMD(NETALT, 1, 1)
+#define VCMD_net_remove_ipv4 VC_CMD(NETALT, 2, 1)
+
+struct vcmd_net_addr_ipv4_v1 {
+ uint16_t type;
+ uint16_t flags;
+ struct in_addr ip;
+ struct in_addr mask;
+};
+
+#define VCMD_net_add_ipv6 VC_CMD(NETALT, 3, 1)
+#define VCMD_net_remove_ipv6 VC_CMD(NETALT, 4, 1)
+
+struct vcmd_net_addr_ipv6_v1 {
+ uint16_t type;
+ uint16_t flags;
+ uint32_t prefix;
+ struct in6_addr ip;
+ struct in6_addr mask;
+};
+
+#define VCMD_add_match_ipv4 VC_CMD(NETALT, 5, 0)
+#define VCMD_get_match_ipv4 VC_CMD(NETALT, 6, 0)
+
+struct vcmd_match_ipv4_v0 {
+ uint16_t type;
+ uint16_t flags;
+ uint16_t parent;
+ uint16_t prefix;
+ struct in_addr ip;
+ struct in_addr ip2;
+ struct in_addr mask;
+};
+
+#define VCMD_add_match_ipv6 VC_CMD(NETALT, 7, 0)
+#define VCMD_get_match_ipv6 VC_CMD(NETALT, 8, 0)
+
+struct vcmd_match_ipv6_v0 {
+ uint16_t type;
+ uint16_t flags;
+ uint16_t parent;
+ uint16_t prefix;
+ struct in6_addr ip;
+ struct in6_addr ip2;
+ struct in6_addr mask;
+};
+
+
+#ifdef __KERNEL__
+extern int vc_net_create(uint32_t, void __user *);
+extern int vc_net_migrate(struct nx_info *, void __user *);
+
+extern int vc_net_add(struct nx_info *, void __user *);
+extern int vc_net_remove(struct nx_info *, void __user *);
+
+extern int vc_net_add_ipv4(struct nx_info *, void __user *);
+extern int vc_net_remove_ipv4(struct nx_info *, void __user *);
+
+extern int vc_net_add_ipv6(struct nx_info *, void __user *);
+extern int vc_net_remove_ipv6(struct nx_info *, void __user *);
+
+extern int vc_add_match_ipv4(struct nx_info *, void __user *);
+extern int vc_get_match_ipv4(struct nx_info *, void __user *);
+
+extern int vc_add_match_ipv6(struct nx_info *, void __user *);
+extern int vc_get_match_ipv6(struct nx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+
+/* flag commands */
+
+#define VCMD_get_nflags VC_CMD(FLAGS, 5, 0)
+#define VCMD_set_nflags VC_CMD(FLAGS, 6, 0)
+
+struct vcmd_net_flags_v0 {
+ uint64_t flagword;
+ uint64_t mask;
+};
+
+#ifdef __KERNEL__
+extern int vc_get_nflags(struct nx_info *, void __user *);
+extern int vc_set_nflags(struct nx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+
+/* network caps commands */
+
+#define VCMD_get_ncaps VC_CMD(FLAGS, 7, 0)
+#define VCMD_set_ncaps VC_CMD(FLAGS, 8, 0)
+
+struct vcmd_net_caps_v0 {
+ uint64_t ncaps;
+ uint64_t cmask;
+};
+
+#ifdef __KERNEL__
+extern int vc_get_ncaps(struct nx_info *, void __user *);
+extern int vc_set_ncaps(struct nx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_CONTEXT_CMD_H */
--- a/include/linux/vserver/network.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/network.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,146 @@
+#ifndef _VX_NETWORK_H
+#define _VX_NETWORK_H
+
+#include <linux/types.h>
+
+
+#define MAX_N_CONTEXT 65535 /* Arbitrary limit */
+
+
+/* network flags */
+
+#define NXF_INFO_PRIVATE 0x00000008
+
+#define NXF_SINGLE_IP 0x00000100
+#define NXF_LBACK_REMAP 0x00000200
+#define NXF_LBACK_ALLOW 0x00000400
+
+#define NXF_HIDE_NETIF 0x02000000
+#define NXF_HIDE_LBACK 0x04000000
+
+#define NXF_STATE_SETUP (1ULL << 32)
+#define NXF_STATE_ADMIN (1ULL << 34)
+
+#define NXF_SC_HELPER (1ULL << 36)
+#define NXF_PERSISTENT (1ULL << 38)
+
+#define NXF_ONE_TIME (0x0005ULL << 32)
+
+
+#define NXF_INIT_SET (__nxf_init_set())
+
+static inline uint64_t __nxf_init_set(void) {
+ return NXF_STATE_ADMIN
+#ifdef CONFIG_VSERVER_AUTO_LBACK
+ | NXF_LBACK_REMAP
+ | NXF_HIDE_LBACK
+#endif
+#ifdef CONFIG_VSERVER_AUTO_SINGLE
+ | NXF_SINGLE_IP
+#endif
+ | NXF_HIDE_NETIF;
+}
+
+
+/* network caps */
+
+#define NXC_TUN_CREATE 0x00000001
+
+#define NXC_RAW_ICMP 0x00000100
+
+
+/* address types */
+
+#define NXA_TYPE_IPV4 0x0001
+#define NXA_TYPE_IPV6 0x0002
+
+#define NXA_TYPE_NONE 0x0000
+#define NXA_TYPE_ANY 0x00FF
+
+#define NXA_TYPE_ADDR 0x0010
+#define NXA_TYPE_MASK 0x0020
+#define NXA_TYPE_RANGE 0x0040
+
+#define NXA_MASK_ALL (NXA_TYPE_ADDR | NXA_TYPE_MASK | NXA_TYPE_RANGE)
+
+#define NXA_MOD_BCAST 0x0100
+#define NXA_MOD_LBACK 0x0200
+
+#define NXA_LOOPBACK 0x1000
+
+#define NXA_MASK_BIND (NXA_MASK_ALL | NXA_MOD_BCAST | NXA_MOD_LBACK)
+#define NXA_MASK_SHOW (NXA_MASK_ALL | NXA_LOOPBACK)
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/rcupdate.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <asm/atomic.h>
+
+struct nx_addr_v4 {
+ struct nx_addr_v4 *next;
+ struct in_addr ip[2];
+ struct in_addr mask;
+ uint16_t type;
+ uint16_t flags;
+};
+
+struct nx_addr_v6 {
+ struct nx_addr_v6 *next;
+ struct in6_addr ip;
+ struct in6_addr mask;
+ uint32_t prefix;
+ uint16_t type;
+ uint16_t flags;
+};
+
+struct nx_info {
+ struct hlist_node nx_hlist; /* linked list of nxinfos */
+ nid_t nx_id; /* vnet id */
+ atomic_t nx_usecnt; /* usage count */
+ atomic_t nx_tasks; /* tasks count */
+ int nx_state; /* context state */
+
+ uint64_t nx_flags; /* network flag word */
+ uint64_t nx_ncaps; /* network capabilities */
+
+ struct in_addr v4_lback; /* Loopback address */
+ struct in_addr v4_bcast; /* Broadcast address */
+ struct nx_addr_v4 v4; /* First/Single ipv4 address */
+#ifdef CONFIG_IPV6
+ struct nx_addr_v6 v6; /* First/Single ipv6 address */
+#endif
+ char nx_name[65]; /* network context name */
+};
+
+
+/* status flags */
+
+#define NXS_HASHED 0x0001
+#define NXS_SHUTDOWN 0x0100
+#define NXS_RELEASED 0x8000
+
+extern struct nx_info *lookup_nx_info(int);
+
+extern int get_nid_list(int, unsigned int *, int);
+extern int nid_is_hashed(nid_t);
+
+extern int nx_migrate_task(struct task_struct *, struct nx_info *);
+
+extern long vs_net_change(struct nx_info *, unsigned int);
+
+struct sock;
+
+
+#define NX_IPV4(n) ((n)->v4.type != NXA_TYPE_NONE)
+#ifdef CONFIG_IPV6
+#define NX_IPV6(n) ((n)->v6.type != NXA_TYPE_NONE)
+#else
+#define NX_IPV6(n) (0)
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _VX_NETWORK_H */
--- a/include/linux/vserver/percpu.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/percpu.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,14 @@
+#ifndef _VX_PERCPU_H
+#define _VX_PERCPU_H
+
+#include "cvirt_def.h"
+#include "sched_def.h"
+
+struct _vx_percpu {
+ struct _vx_cvirt_pc cvirt;
+ struct _vx_sched_pc sched;
+};
+
+#define PERCPU_PERCTX (sizeof(struct _vx_percpu))
+
+#endif /* _VX_PERCPU_H */
--- a/include/linux/vserver/pid.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/pid.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,51 @@
+#ifndef _VSERVER_PID_H
+#define _VSERVER_PID_H
+
+/* pid faking stuff */
+
+#define vx_info_map_pid(v, p) \
+ __vx_info_map_pid((v), (p), __FUNC__, __FILE__, __LINE__)
+#define vx_info_map_tgid(v,p) vx_info_map_pid(v,p)
+#define vx_map_pid(p) vx_info_map_pid(current->vx_info, p)
+#define vx_map_tgid(p) vx_map_pid(p)
+
+static inline int __vx_info_map_pid(struct vx_info *vxi, int pid,
+ const char *func, const char *file, int line)
+{
+ if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
+ vxfprintk(VXD_CBIT(cvirt, 2),
+ "vx_map_tgid: %p/%llx: %d -> %d",
+ vxi, (long long)vxi->vx_flags, pid,
+ (pid && pid == vxi->vx_initpid) ? 1 : pid,
+ func, file, line);
+ if (pid == 0)
+ return 0;
+ if (pid == vxi->vx_initpid)
+ return 1;
+ }
+ return pid;
+}
+
+#define vx_info_rmap_pid(v, p) \
+ __vx_info_rmap_pid((v), (p), __FUNC__, __FILE__, __LINE__)
+#define vx_rmap_pid(p) vx_info_rmap_pid(current->vx_info, p)
+#define vx_rmap_tgid(p) vx_rmap_pid(p)
+
+static inline int __vx_info_rmap_pid(struct vx_info *vxi, int pid,
+ const char *func, const char *file, int line)
+{
+ if (vx_info_flags(vxi, VXF_INFO_INIT, 0)) {
+ vxfprintk(VXD_CBIT(cvirt, 2),
+ "vx_rmap_tgid: %p/%llx: %d -> %d",
+ vxi, (long long)vxi->vx_flags, pid,
+ (pid == 1) ? vxi->vx_initpid : pid,
+ func, file, line);
+ if ((pid == 1) && vxi->vx_initpid)
+ return vxi->vx_initpid;
+ if (pid == vxi->vx_initpid)
+ return ~0U;
+ }
+ return pid;
+}
+
+#endif
--- a/include/linux/vserver/sched_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/sched_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,108 @@
+#ifndef _VX_SCHED_CMD_H
+#define _VX_SCHED_CMD_H
+
+
+/* sched vserver commands */
+
+#define VCMD_set_sched_v2 VC_CMD(SCHED, 1, 2)
+#define VCMD_set_sched_v3 VC_CMD(SCHED, 1, 3)
+#define VCMD_set_sched_v4 VC_CMD(SCHED, 1, 4)
+
+struct vcmd_set_sched_v2 {
+ int32_t fill_rate;
+ int32_t interval;
+ int32_t tokens;
+ int32_t tokens_min;
+ int32_t tokens_max;
+ uint64_t cpu_mask;
+};
+
+struct vcmd_set_sched_v3 {
+ uint32_t set_mask;
+ int32_t fill_rate;
+ int32_t interval;
+ int32_t tokens;
+ int32_t tokens_min;
+ int32_t tokens_max;
+ int32_t priority_bias;
+};
+
+struct vcmd_set_sched_v4 {
+ uint32_t set_mask;
+ int32_t fill_rate;
+ int32_t interval;
+ int32_t tokens;
+ int32_t tokens_min;
+ int32_t tokens_max;
+ int32_t prio_bias;
+ int32_t cpu_id;
+ int32_t bucket_id;
+};
+
+#define VCMD_set_sched VC_CMD(SCHED, 1, 5)
+#define VCMD_get_sched VC_CMD(SCHED, 2, 5)
+
+struct vcmd_sched_v5 {
+ uint32_t mask;
+ int32_t cpu_id;
+ int32_t bucket_id;
+ int32_t fill_rate[2];
+ int32_t interval[2];
+ int32_t tokens;
+ int32_t tokens_min;
+ int32_t tokens_max;
+ int32_t prio_bias;
+};
+
+#define VXSM_FILL_RATE 0x0001
+#define VXSM_INTERVAL 0x0002
+#define VXSM_FILL_RATE2 0x0004
+#define VXSM_INTERVAL2 0x0008
+#define VXSM_TOKENS 0x0010
+#define VXSM_TOKENS_MIN 0x0020
+#define VXSM_TOKENS_MAX 0x0040
+#define VXSM_PRIO_BIAS 0x0100
+
+#define VXSM_IDLE_TIME 0x0200
+#define VXSM_FORCE 0x0400
+
+#define VXSM_V3_MASK 0x0173
+#define VXSM_SET_MASK 0x01FF
+
+#define VXSM_CPU_ID 0x1000
+#define VXSM_BUCKET_ID 0x2000
+
+#define VXSM_MSEC 0x4000
+
+#define SCHED_KEEP (-2) /* only for v2 */
+
+#ifdef __KERNEL__
+
+#include <linux/compiler.h>
+
+extern int vc_set_sched_v2(struct vx_info *, void __user *);
+extern int vc_set_sched_v3(struct vx_info *, void __user *);
+extern int vc_set_sched_v4(struct vx_info *, void __user *);
+extern int vc_set_sched(struct vx_info *, void __user *);
+extern int vc_get_sched(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+#define VCMD_sched_info VC_CMD(SCHED, 3, 0)
+
+struct vcmd_sched_info {
+ int32_t cpu_id;
+ int32_t bucket_id;
+ uint64_t user_msec;
+ uint64_t sys_msec;
+ uint64_t hold_msec;
+ uint32_t token_usec;
+ int32_t vavavoom;
+};
+
+#ifdef __KERNEL__
+
+extern int vc_sched_info(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_SCHED_CMD_H */
--- a/include/linux/vserver/sched_def.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/sched_def.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,68 @@
+#ifndef _VX_SCHED_DEF_H
+#define _VX_SCHED_DEF_H
+
+#include <linux/spinlock.h>
+#include <linux/jiffies.h>
+#include <linux/cpumask.h>
+#include <asm/atomic.h>
+#include <asm/param.h>
+
+
+/* context sub struct */
+
+struct _vx_sched {
+ spinlock_t tokens_lock; /* lock for token bucket */
+
+ int tokens; /* number of CPU tokens */
+ int fill_rate[2]; /* Fill rate: add X tokens... */
+ int interval[2]; /* Divisor: per Y jiffies */
+ int tokens_min; /* Limit: minimum for unhold */
+ int tokens_max; /* Limit: no more than N tokens */
+
+ int prio_bias; /* bias offset for priority */
+
+ unsigned update_mask; /* which features should be updated */
+ cpumask_t update; /* CPUs which should update */
+};
+
+struct _vx_sched_pc {
+ int tokens; /* number of CPU tokens */
+ int flags; /* bucket flags */
+
+ int fill_rate[2]; /* Fill rate: add X tokens... */
+ int interval[2]; /* Divisor: per Y jiffies */
+ int tokens_min; /* Limit: minimum for unhold */
+ int tokens_max; /* Limit: no more than N tokens */
+
+ int prio_bias; /* bias offset for priority */
+ int vavavoom; /* last calculated vavavoom */
+
+ unsigned long norm_time; /* last time accounted */
+ unsigned long idle_time; /* non linear time for fair sched */
+ unsigned long token_time; /* token time for accounting */
+ unsigned long onhold; /* jiffies when put on hold */
+
+ uint64_t user_ticks; /* token tick events */
+ uint64_t sys_ticks; /* token tick events */
+ uint64_t hold_ticks; /* token ticks paused */
+};
+
+
+#define VXSF_ONHOLD 0x0001
+#define VXSF_IDLE_TIME 0x0100
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+static inline void __dump_vx_sched(struct _vx_sched *sched)
+{
+ printk("\t_vx_sched:\n");
+ printk("\t tokens: %4d/%4d, %4d/%4d, %4d, %4d\n",
+ sched->fill_rate[0], sched->interval[0],
+ sched->fill_rate[1], sched->interval[1],
+ sched->tokens_min, sched->tokens_max);
+ printk("\t priority = %4d\n", sched->prio_bias);
+}
+
+#endif
+
+#endif /* _VX_SCHED_DEF_H */
--- a/include/linux/vserver/sched.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/sched.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,26 @@
+#ifndef _VX_SCHED_H
+#define _VX_SCHED_H
+
+
+#ifdef __KERNEL__
+
+struct timespec;
+
+void vx_vsi_uptime(struct timespec *, struct timespec *);
+
+
+struct vx_info;
+
+void vx_update_load(struct vx_info *);
+
+
+int vx_tokens_recalc(struct _vx_sched_pc *,
+ unsigned long *, unsigned long *, int [2]);
+
+void vx_update_sched_param(struct _vx_sched *sched,
+ struct _vx_sched_pc *sched_pc);
+
+#endif /* __KERNEL__ */
+#else /* _VX_SCHED_H */
+#warning duplicate inclusion
+#endif /* _VX_SCHED_H */
--- a/include/linux/vserver/signal_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/signal_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,43 @@
+#ifndef _VX_SIGNAL_CMD_H
+#define _VX_SIGNAL_CMD_H
+
+
+/* signalling vserver commands */
+
+#define VCMD_ctx_kill VC_CMD(PROCTRL, 1, 0)
+#define VCMD_wait_exit VC_CMD(EVENT, 99, 0)
+
+struct vcmd_ctx_kill_v0 {
+ int32_t pid;
+ int32_t sig;
+};
+
+struct vcmd_wait_exit_v0 {
+ int32_t reboot_cmd;
+ int32_t exit_code;
+};
+
+#ifdef __KERNEL__
+
+extern int vc_ctx_kill(struct vx_info *, void __user *);
+extern int vc_wait_exit(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+
+/* process alteration commands */
+
+#define VCMD_get_pflags VC_CMD(PROCALT, 5, 0)
+#define VCMD_set_pflags VC_CMD(PROCALT, 6, 0)
+
+struct vcmd_pflags_v0 {
+ uint32_t flagword;
+ uint32_t mask;
+};
+
+#ifdef __KERNEL__
+
+extern int vc_get_pflags(uint32_t pid, void __user *);
+extern int vc_set_pflags(uint32_t pid, void __user *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_SIGNAL_CMD_H */
--- a/include/linux/vserver/signal.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/signal.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,14 @@
+#ifndef _VX_SIGNAL_H
+#define _VX_SIGNAL_H
+
+
+#ifdef __KERNEL__
+
+struct vx_info;
+
+int vx_info_kill(struct vx_info *, int, int);
+
+#endif /* __KERNEL__ */
+#else /* _VX_SIGNAL_H */
+#warning duplicate inclusion
+#endif /* _VX_SIGNAL_H */
--- a/include/linux/vserver/space_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/space_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,26 @@
+#ifndef _VX_SPACE_CMD_H
+#define _VX_SPACE_CMD_H
+
+
+#define VCMD_enter_space_v0 VC_CMD(PROCALT, 1, 0)
+#define VCMD_enter_space VC_CMD(PROCALT, 1, 1)
+
+#define VCMD_set_space_v0 VC_CMD(PROCALT, 3, 0)
+#define VCMD_set_space VC_CMD(PROCALT, 3, 1)
+
+#define VCMD_get_space_mask VC_CMD(PROCALT, 4, 0)
+
+
+struct vcmd_space_mask {
+ uint64_t mask;
+};
+
+
+#ifdef __KERNEL__
+
+extern int vc_enter_space(struct vx_info *, void __user *);
+extern int vc_set_space(struct vx_info *, void __user *);
+extern int vc_get_space_mask(struct vx_info *, void __user *);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_SPACE_CMD_H */
--- a/include/linux/vserver/space.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/space.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,13 @@
+#ifndef _VX_SPACE_H
+#define _VX_SPACE_H
+
+
+#include <linux/types.h>
+
+struct vx_info;
+
+int vx_set_space(struct vx_info *vxi, unsigned long mask);
+
+#else /* _VX_SPACE_H */
+#warning duplicate inclusion
+#endif /* _VX_SPACE_H */
--- a/include/linux/vserver/switch.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/switch.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,99 @@
+#ifndef _VX_SWITCH_H
+#define _VX_SWITCH_H
+
+#include <linux/types.h>
+
+
+#define VC_CATEGORY(c) (((c) >> 24) & 0x3F)
+#define VC_COMMAND(c) (((c) >> 16) & 0xFF)
+#define VC_VERSION(c) ((c) & 0xFFF)
+
+#define VC_CMD(c, i, v) ((((VC_CAT_ ## c) & 0x3F) << 24) \
+ | (((i) & 0xFF) << 16) | ((v) & 0xFFF))
+
+/*
+
+ Syscall Matrix V2.8
+
+ |VERSION|CREATE |MODIFY |MIGRATE|CONTROL|EXPERIM| |SPECIAL|SPECIAL|
+ |STATS |DESTROY|ALTER |CHANGE |LIMIT |TEST | | | |
+ |INFO |SETUP | |MOVE | | | | | |
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ SYSTEM |VERSION|VSETUP |VHOST | | | | |DEVICE | |
+ HOST | 00| 01| 02| 03| 04| 05| | 06| 07|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ CPU | |VPROC |PROCALT|PROCMIG|PROCTRL| | |SCHED. | |
+ PROCESS| 08| 09| 10| 11| 12| 13| | 14| 15|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ MEMORY | | | | |MEMCTRL| | |SWAP | |
+ | 16| 17| 18| 19| 20| 21| | 22| 23|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ NETWORK| |VNET |NETALT |NETMIG |NETCTL | | |SERIAL | |
+ | 24| 25| 26| 27| 28| 29| | 30| 31|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ DISK | | | |TAGMIG |DLIMIT | | |INODE | |
+ VFS | 32| 33| 34| 35| 36| 37| | 38| 39|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ OTHER |VSTAT | | | | | | |VINFO | |
+ | 40| 41| 42| 43| 44| 45| | 46| 47|
+ =======+=======+=======+=======+=======+=======+=======+ +=======+=======+
+ SPECIAL|EVENT | | | |FLAGS | | | | |
+ | 48| 49| 50| 51| 52| 53| | 54| 55|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+ SPECIAL|DEBUG | | | |RLIMIT |SYSCALL| | |COMPAT |
+ | 56| 57| 58| 59| 60|TEST 61| | 62| 63|
+ -------+-------+-------+-------+-------+-------+-------+ +-------+-------+
+
+*/
+
+#define VC_CAT_VERSION 0
+
+#define VC_CAT_VSETUP 1
+#define VC_CAT_VHOST 2
+
+#define VC_CAT_DEVICE 6
+
+#define VC_CAT_VPROC 9
+#define VC_CAT_PROCALT 10
+#define VC_CAT_PROCMIG 11
+#define VC_CAT_PROCTRL 12
+
+#define VC_CAT_SCHED 14
+#define VC_CAT_MEMCTRL 20
+
+#define VC_CAT_VNET 25
+#define VC_CAT_NETALT 26
+#define VC_CAT_NETMIG 27
+#define VC_CAT_NETCTRL 28
+
+#define VC_CAT_TAGMIG 35
+#define VC_CAT_DLIMIT 36
+#define VC_CAT_INODE 38
+
+#define VC_CAT_VSTAT 40
+#define VC_CAT_VINFO 46
+#define VC_CAT_EVENT 48
+
+#define VC_CAT_FLAGS 52
+#define VC_CAT_DEBUG 56
+#define VC_CAT_RLIMIT 60
+
+#define VC_CAT_SYSTEST 61
+#define VC_CAT_COMPAT 63
+
+/* query version */
+
+#define VCMD_get_version VC_CMD(VERSION, 0, 0)
+#define VCMD_get_vci VC_CMD(VERSION, 1, 0)
+
+
+#ifdef __KERNEL__
+
+#include <linux/errno.h>
+
+
+#else /* __KERNEL__ */
+#define __user
+#endif /* __KERNEL__ */
+
+#endif /* _VX_SWITCH_H */
--- a/include/linux/vserver/tag_cmd.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/tag_cmd.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,22 @@
+#ifndef _VX_TAG_CMD_H
+#define _VX_TAG_CMD_H
+
+
+/* vinfo commands */
+
+#define VCMD_task_tag VC_CMD(VINFO, 3, 0)
+
+#ifdef __KERNEL__
+extern int vc_task_tag(uint32_t);
+
+#endif /* __KERNEL__ */
+
+/* context commands */
+
+#define VCMD_tag_migrate VC_CMD(TAGMIG, 1, 0)
+
+#ifdef __KERNEL__
+extern int vc_tag_migrate(uint32_t);
+
+#endif /* __KERNEL__ */
+#endif /* _VX_TAG_CMD_H */
--- a/include/linux/vserver/tag.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vserver/tag.h 2008-04-21 13:53:47.000000000 -0400
@@ -0,0 +1,143 @@
+#ifndef _DX_TAG_H
+#define _DX_TAG_H
+
+#include <linux/types.h>
+
+
+#define DX_TAG(in) (IS_TAGGED(in))
+
+
+#ifdef CONFIG_DX_TAG_NFSD
+#define DX_TAG_NFSD 1
+#else
+#define DX_TAG_NFSD 0
+#endif
+
+
+#ifdef CONFIG_TAGGING_NONE
+
+#define MAX_UID 0xFFFFFFFF
+#define MAX_GID 0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) (0)
+
+#define TAGINO_UID(cond, uid, tag) (uid)
+#define TAGINO_GID(cond, gid, tag) (gid)
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_GID16
+
+#define MAX_UID 0xFFFFFFFF
+#define MAX_GID 0x0000FFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) \
+ ((cond) ? (((gid) >> 16) & 0xFFFF) : 0)
+
+#define TAGINO_UID(cond, uid, tag) (uid)
+#define TAGINO_GID(cond, gid, tag) \
+ ((cond) ? (((gid) & 0xFFFF) | ((tag) << 16)) : (gid))
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_ID24
+
+#define MAX_UID 0x00FFFFFF
+#define MAX_GID 0x00FFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) \
+ ((cond) ? ((((uid) >> 16) & 0xFF00) | (((gid) >> 24) & 0xFF)) : 0)
+
+#define TAGINO_UID(cond, uid, tag) \
+ ((cond) ? (((uid) & 0xFFFFFF) | (((tag) & 0xFF00) << 16)) : (uid))
+#define TAGINO_GID(cond, gid, tag) \
+ ((cond) ? (((gid) & 0xFFFFFF) | (((tag) & 0x00FF) << 24)) : (gid))
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_UID16
+
+#define MAX_UID 0x0000FFFF
+#define MAX_GID 0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) \
+ ((cond) ? (((uid) >> 16) & 0xFFFF) : 0)
+
+#define TAGINO_UID(cond, uid, tag) \
+ ((cond) ? (((uid) & 0xFFFF) | ((tag) << 16)) : (uid))
+#define TAGINO_GID(cond, gid, tag) (gid)
+
+#endif
+
+
+#ifdef CONFIG_TAGGING_INTERN
+
+#define MAX_UID 0xFFFFFFFF
+#define MAX_GID 0xFFFFFFFF
+
+#define INOTAG_TAG(cond, uid, gid, tag) \
+ ((cond) ? (tag) : 0)
+
+#define TAGINO_UID(cond, uid, tag) (uid)
+#define TAGINO_GID(cond, gid, tag) (gid)
+
+#endif
+
+
+#ifndef CONFIG_TAGGING_NONE
+#define dx_current_fstag(sb) \
+ ((sb)->s_flags & MS_TAGGED ? dx_current_tag() : 0)
+#else
+#define dx_current_fstag(sb) (0)
+#endif
+
+#ifndef CONFIG_TAGGING_INTERN
+#define TAGINO_TAG(cond, tag) (0)
+#else
+#define TAGINO_TAG(cond, tag) ((cond) ? (tag) : 0)
+#endif
+
+#define INOTAG_UID(cond, uid, gid) \
+ ((cond) ? ((uid) & MAX_UID) : (uid))
+#define INOTAG_GID(cond, uid, gid) \
+ ((cond) ? ((gid) & MAX_GID) : (gid))
+
+
+static inline uid_t dx_map_uid(uid_t uid)
+{
+ if ((uid > MAX_UID) && (uid != -1))
+ uid = -2;
+ return (uid & MAX_UID);
+}
+
+static inline gid_t dx_map_gid(gid_t gid)
+{
+ if ((gid > MAX_GID) && (gid != -1))
+ gid = -2;
+ return (gid & MAX_GID);
+}
+
+struct peer_tag {
+ int32_t xid;
+ int32_t nid;
+};
+
+#define dx_notagcheck(nd) ((nd) && (nd)->path.mnt && \
+ ((nd)->path.mnt->mnt_flags & MNT_NOTAGCHECK))
+
+int dx_parse_tag(char *string, tag_t *tag, int remove);
+
+#ifdef CONFIG_PROPAGATE
+
+void __dx_propagate_tag(struct nameidata *nd, struct inode *inode);
+
+#define dx_propagate_tag(n, i) __dx_propagate_tag(n, i)
+
+#else
+#define dx_propagate_tag(n, i) do { } while (0)
+#endif
+
+#endif /* _DX_TAG_H */
--- a/include/linux/vs_inet6.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_inet6.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,229 @@
+#ifndef _VS_INET6_H
+#define _VS_INET6_H
+
+#include "vserver/base.h"
+#include "vserver/network.h"
+#include "vserver/debug.h"
+
+#include <net/ipv6.h>
+
+#define NXAV6(a) NIP6((a)->ip), NIP6((a)->mask), (a)->prefix, (a)->type
+#define NXAV6_FMT "[" NIP6_FMT "/" NIP6_FMT "/%d:%04x]"
+
+
+#ifdef CONFIG_IPV6
+
+static inline
+int v6_addr_match(struct nx_addr_v6 *nxa,
+ const struct in6_addr *addr, uint16_t mask)
+{
+ switch (nxa->type & mask) {
+ case NXA_TYPE_MASK:
+ return ipv6_masked_addr_cmp(&nxa->ip, &nxa->mask, addr);
+ case NXA_TYPE_ADDR:
+ return ipv6_addr_equal(&nxa->ip, addr);
+ case NXA_TYPE_ANY:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline
+int v6_addr_in_nx_info(struct nx_info *nxi,
+ const struct in6_addr *addr, uint16_t mask)
+{
+ struct nx_addr_v6 *nxa;
+
+ if (!nxi)
+ return 1;
+ for (nxa = &nxi->v6; nxa; nxa = nxa->next)
+ if (v6_addr_match(nxa, addr, mask))
+ return 1;
+ return 0;
+}
+
+static inline
+int v6_nx_addr_match(struct nx_addr_v6 *nxa, struct nx_addr_v6 *addr, uint16_t mask)
+{
+ /* FIXME: needs full range checks */
+ return v6_addr_match(nxa, &addr->ip, mask);
+}
+
+static inline
+int v6_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v6 *nxa, uint16_t mask)
+{
+ struct nx_addr_v6 *ptr;
+
+ for (ptr = &nxi->v6; ptr; ptr = ptr->next)
+ if (v6_nx_addr_match(ptr, nxa, mask))
+ return 1;
+ return 0;
+}
+
+
+/*
+ * Check if a given address matches for a socket
+ *
+ * nxi: the socket's nx_info if any
+ * addr: to be verified address
+ */
+static inline
+int v6_sock_addr_match (
+ struct nx_info *nxi,
+ struct inet_sock *inet,
+ struct in6_addr *addr)
+{
+ struct sock *sk = &inet->sk;
+ struct in6_addr *saddr = inet6_rcv_saddr(sk);
+
+ if (!ipv6_addr_any(addr) &&
+ ipv6_addr_equal(saddr, addr))
+ return 1;
+ if (ipv6_addr_any(saddr))
+ return v6_addr_in_nx_info(nxi, addr, -1);
+ return 0;
+}
+
+/*
+ * check if address is covered by socket
+ *
+ * sk: the socket to check against
+ * addr: the address in question (must be != 0)
+ */
+
+static inline
+int __v6_addr_match_socket(const struct sock *sk, struct nx_addr_v6 *nxa)
+{
+ struct nx_info *nxi = sk->sk_nx_info;
+ struct in6_addr *saddr = inet6_rcv_saddr(sk);
+
+ vxdprintk(VXD_CBIT(net, 5),
+ "__v6_addr_in_socket(%p," NXAV6_FMT ") %p:" NIP6_FMT " %p;%lx",
+ sk, NXAV6(nxa), nxi, NIP6(*saddr), sk->sk_socket,
+ (sk->sk_socket?sk->sk_socket->flags:0));
+
+ if (!ipv6_addr_any(saddr)) { /* direct address match */
+ return v6_addr_match(nxa, saddr, -1);
+ } else if (nxi) { /* match against nx_info */
+ return v6_nx_addr_in_nx_info(nxi, nxa, -1);
+ } else { /* unrestricted any socket */
+ return 1;
+ }
+}
+
+
+/* inet related checks and helpers */
+
+
+struct in_ifaddr;
+struct net_device;
+struct sock;
+
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/inet_timewait_sock.h>
+
+
+int dev_in_nx_info(struct net_device *, struct nx_info *);
+int v6_dev_in_nx_info(struct net_device *, struct nx_info *);
+int nx_v6_addr_conflict(struct nx_info *, struct nx_info *);
+
+
+
+static inline
+int v6_ifa_in_nx_info(struct inet6_ifaddr *ifa, struct nx_info *nxi)
+{
+ if (!nxi)
+ return 1;
+ if (!ifa)
+ return 0;
+ return v6_addr_in_nx_info(nxi, &ifa->addr, -1);
+}
+
+static inline
+int nx_v6_ifa_visible(struct nx_info *nxi, struct inet6_ifaddr *ifa)
+{
+ if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
+ return 1;
+ if (v6_ifa_in_nx_info(ifa, nxi))
+ return 1;
+ return 0;
+}
+
+
+struct nx_v6_sock_addr {
+ struct in6_addr saddr; /* Address used for validation */
+ struct in6_addr baddr; /* Address used for socket bind */
+};
+
+static inline
+int v6_map_sock_addr(struct inet_sock *inet, struct sockaddr_in6 *addr,
+ struct nx_v6_sock_addr *nsa)
+{
+ // struct sock *sk = &inet->sk;
+ // struct nx_info *nxi = sk->sk_nx_info;
+ struct in6_addr saddr = addr->sin6_addr;
+ struct in6_addr baddr = saddr;
+
+ nsa->saddr = saddr;
+ nsa->baddr = baddr;
+ return 0;
+}
+
+static inline
+void v6_set_sock_addr(struct inet_sock *inet, struct nx_v6_sock_addr *nsa)
+{
+ // struct sock *sk = &inet->sk;
+ // struct in6_addr *saddr = inet6_rcv_saddr(sk);
+
+ // *saddr = nsa->baddr;
+ // inet->saddr = nsa->baddr;
+}
+
+static inline
+int nx_info_has_v6(struct nx_info *nxi)
+{
+ if (!nxi)
+ return 1;
+ if (NX_IPV6(nxi))
+ return 1;
+ return 0;
+}
+
+#else /* CONFIG_IPV6 */
+
+static inline
+int nx_v6_dev_visible(struct nx_info *n, struct net_device *d)
+{
+ return 1;
+}
+
+
+static inline
+int nx_v6_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
+{
+ return 1;
+}
+
+static inline
+int v6_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
+{
+ return 1;
+}
+
+static inline
+int nx_info_has_v6(struct nx_info *nxi)
+{
+ return 0;
+}
+
+#endif /* CONFIG_IPV6 */
+
+#define current_nx_info_has_v6() \
+ nx_info_has_v6(current_nx_info())
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_inet.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_inet.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,342 @@
+#ifndef _VS_INET_H
+#define _VS_INET_H
+
+#include "vserver/base.h"
+#include "vserver/network.h"
+#include "vserver/debug.h"
+
+#define IPI_LOOPBACK htonl(INADDR_LOOPBACK)
+
+#define NXAV4(a) NIPQUAD((a)->ip[0]), NIPQUAD((a)->ip[1]), \
+ NIPQUAD((a)->mask), (a)->type
+#define NXAV4_FMT "[" NIPQUAD_FMT "-" NIPQUAD_FMT "/" NIPQUAD_FMT ":%04x]"
+
+
+static inline
+int v4_addr_match(struct nx_addr_v4 *nxa, __be32 addr, uint16_t tmask)
+{
+ __be32 ip = nxa->ip[0].s_addr;
+ __be32 mask = nxa->mask.s_addr;
+ __be32 bcast = ip | ~mask;
+ int ret = 0;
+
+ switch (nxa->type & tmask) {
+ case NXA_TYPE_MASK:
+ ret = (ip == (addr & mask));
+ break;
+ case NXA_TYPE_ADDR:
+ ret = 3;
+ if (addr == ip)
+ break;
+ /* fall through to broadcast */
+ case NXA_MOD_BCAST:
+ ret = ((tmask & NXA_MOD_BCAST) && (addr == bcast));
+ break;
+ case NXA_TYPE_RANGE:
+ ret = ((nxa->ip[0].s_addr <= addr) &&
+ (nxa->ip[1].s_addr > addr));
+ break;
+ case NXA_TYPE_ANY:
+ ret = 2;
+ break;
+ }
+
+ vxdprintk(VXD_CBIT(net, 0),
+ "v4_addr_match(%p" NXAV4_FMT "," NIPQUAD_FMT ",%04x) = %d",
+ nxa, NXAV4(nxa), NIPQUAD(addr), tmask, ret);
+ return ret;
+}
+
+static inline
+int v4_addr_in_nx_info(struct nx_info *nxi, __be32 addr, uint16_t tmask)
+{
+ struct nx_addr_v4 *nxa;
+ int ret = 1;
+
+ if (!nxi)
+ goto out;
+
+ ret = 2;
+ /* allow 127.0.0.1 when remapping lback */
+ if ((tmask & NXA_LOOPBACK) &&
+ (addr == IPI_LOOPBACK) &&
+ nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
+ goto out;
+ ret = 3;
+ /* check for lback address */
+ if ((tmask & NXA_MOD_LBACK) &&
+ (nxi->v4_lback.s_addr == addr))
+ goto out;
+ ret = 4;
+ /* check for broadcast address */
+ if ((tmask & NXA_MOD_BCAST) &&
+ (nxi->v4_bcast.s_addr == addr))
+ goto out;
+ ret = 5;
+ /* check for v4 addresses */
+ for (nxa = &nxi->v4; nxa; nxa = nxa->next)
+ if (v4_addr_match(nxa, addr, tmask))
+ goto out;
+ ret = 0;
+out:
+ vxdprintk(VXD_CBIT(net, 0),
+ "v4_addr_in_nx_info(%p[#%u]," NIPQUAD_FMT ",%04x) = %d",
+ nxi, nxi ? nxi->nx_id : 0, NIPQUAD(addr), tmask, ret);
+ return ret;
+}
+
+static inline
+int v4_nx_addr_match(struct nx_addr_v4 *nxa, struct nx_addr_v4 *addr, uint16_t mask)
+{
+ /* FIXME: needs full range checks */
+ return v4_addr_match(nxa, addr->ip[0].s_addr, mask);
+}
+
+static inline
+int v4_nx_addr_in_nx_info(struct nx_info *nxi, struct nx_addr_v4 *nxa, uint16_t mask)
+{
+ struct nx_addr_v4 *ptr;
+
+ for (ptr = &nxi->v4; ptr; ptr = ptr->next)
+ if (v4_nx_addr_match(ptr, nxa, mask))
+ return 1;
+ return 0;
+}
+
+#include <net/inet_sock.h>
+
+/*
+ * Check if a given address matches for a socket
+ *
+ * nxi: the socket's nx_info if any
+ * addr: to be verified address
+ */
+static inline
+int v4_sock_addr_match (
+ struct nx_info *nxi,
+ struct inet_sock *inet,
+ __be32 addr)
+{
+ __be32 saddr = inet->rcv_saddr;
+ __be32 bcast = nxi ? nxi->v4_bcast.s_addr : INADDR_BROADCAST;
+
+ if (addr && (saddr == addr || bcast == addr))
+ return 1;
+ if (!saddr)
+ return v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND);
+ return 0;
+}
+
+
+/* inet related checks and helpers */
+
+
+struct in_ifaddr;
+struct net_device;
+struct sock;
+
+#ifdef CONFIG_INET
+
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <net/inet_sock.h>
+#include <net/inet_timewait_sock.h>
+
+
+int dev_in_nx_info(struct net_device *, struct nx_info *);
+int v4_dev_in_nx_info(struct net_device *, struct nx_info *);
+int nx_v4_addr_conflict(struct nx_info *, struct nx_info *);
+
+
+/*
+ * check if address is covered by socket
+ *
+ * sk: the socket to check against
+ * addr: the address in question (must be != 0)
+ */
+
+static inline
+int __v4_addr_match_socket(const struct sock *sk, struct nx_addr_v4 *nxa)
+{
+ struct nx_info *nxi = sk->sk_nx_info;
+ __be32 saddr = inet_rcv_saddr(sk);
+
+ vxdprintk(VXD_CBIT(net, 5),
+ "__v4_addr_in_socket(%p," NXAV4_FMT ") %p:" NIPQUAD_FMT " %p;%lx",
+ sk, NXAV4(nxa), nxi, NIPQUAD(saddr), sk->sk_socket,
+ (sk->sk_socket?sk->sk_socket->flags:0));
+
+ if (saddr) { /* direct address match */
+ return v4_addr_match(nxa, saddr, -1);
+ } else if (nxi) { /* match against nx_info */
+ return v4_nx_addr_in_nx_info(nxi, nxa, -1);
+ } else { /* unrestricted any socket */
+ return 1;
+ }
+}
+
+
+
+static inline
+int nx_dev_visible(struct nx_info *nxi, struct net_device *dev)
+{
+ vxdprintk(VXD_CBIT(net, 1), "nx_dev_visible(%p[#%u],%p <20>%s<>) %d",
+ nxi, nxi ? nxi->nx_id : 0, dev, dev->name,
+ nxi ? dev_in_nx_info(dev, nxi) : 0);
+
+ if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
+ return 1;
+ if (dev_in_nx_info(dev, nxi))
+ return 1;
+ return 0;
+}
+
+
+static inline
+int v4_ifa_in_nx_info(struct in_ifaddr *ifa, struct nx_info *nxi)
+{
+ if (!nxi)
+ return 1;
+ if (!ifa)
+ return 0;
+ return v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW);
+}
+
+static inline
+int nx_v4_ifa_visible(struct nx_info *nxi, struct in_ifaddr *ifa)
+{
+ vxdprintk(VXD_CBIT(net, 1), "nx_v4_ifa_visible(%p[#%u],%p) %d",
+ nxi, nxi ? nxi->nx_id : 0, ifa,
+ nxi ? v4_ifa_in_nx_info(ifa, nxi) : 0);
+
+ if (!nx_info_flags(nxi, NXF_HIDE_NETIF, 0))
+ return 1;
+ if (v4_ifa_in_nx_info(ifa, nxi))
+ return 1;
+ return 0;
+}
+
+
+struct nx_v4_sock_addr {
+ __be32 saddr; /* Address used for validation */
+ __be32 baddr; /* Address used for socket bind */
+};
+
+static inline
+int v4_map_sock_addr(struct inet_sock *inet, struct sockaddr_in *addr,
+ struct nx_v4_sock_addr *nsa)
+{
+ struct sock *sk = &inet->sk;
+ struct nx_info *nxi = sk->sk_nx_info;
+ __be32 saddr = addr->sin_addr.s_addr;
+ __be32 baddr = saddr;
+
+ vxdprintk(VXD_CBIT(net, 3),
+ "inet_bind(%p)* %p,%p;%lx " NIPQUAD_FMT,
+ sk, sk->sk_nx_info, sk->sk_socket,
+ (sk->sk_socket ? sk->sk_socket->flags : 0),
+ NIPQUAD(saddr));
+
+ if (nxi) {
+ if (saddr == INADDR_ANY) {
+ if (nx_info_flags(nxi, NXF_SINGLE_IP, 0))
+ baddr = nxi->v4.ip[0].s_addr;
+ } else if (saddr == IPI_LOOPBACK) {
+ if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
+ baddr = nxi->v4_lback.s_addr;
+ } else { /* normal address bind */
+ if (!v4_addr_in_nx_info(nxi, saddr, NXA_MASK_BIND))
+ return -EADDRNOTAVAIL;
+ }
+ }
+
+ vxdprintk(VXD_CBIT(net, 3),
+ "inet_bind(%p) " NIPQUAD_FMT ", " NIPQUAD_FMT,
+ sk, NIPQUAD(saddr), NIPQUAD(baddr));
+
+ nsa->saddr = saddr;
+ nsa->baddr = baddr;
+ return 0;
+}
+
+static inline
+void v4_set_sock_addr(struct inet_sock *inet, struct nx_v4_sock_addr *nsa)
+{
+ inet->saddr = nsa->baddr;
+ inet->rcv_saddr = nsa->baddr;
+}
+
+
+/*
+ * helper to simplify inet_lookup_listener
+ *
+ * nxi: the socket's nx_info if any
+ * addr: to be verified address
+ * saddr: socket address
+ */
+static inline int v4_inet_addr_match (
+ struct nx_info *nxi,
+ __be32 addr,
+ __be32 saddr)
+{
+ if (addr && (saddr == addr))
+ return 1;
+ if (!saddr)
+ return nxi ? v4_addr_in_nx_info(nxi, addr, NXA_MASK_BIND) : 1;
+ return 0;
+}
+
+static inline __be32 nx_map_sock_lback(struct nx_info *nxi, __be32 addr)
+{
+ if (nx_info_flags(nxi, NXF_HIDE_LBACK, 0) &&
+ (addr == nxi->v4_lback.s_addr))
+ return IPI_LOOPBACK;
+ return addr;
+}
+
+static inline
+int nx_info_has_v4(struct nx_info *nxi)
+{
+ if (!nxi)
+ return 1;
+ if (NX_IPV4(nxi))
+ return 1;
+ if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0))
+ return 1;
+ return 0;
+}
+
+#else /* CONFIG_INET */
+
+static inline
+int nx_dev_visible(struct nx_info *n, struct net_device *d)
+{
+ return 1;
+}
+
+static inline
+int nx_v4_addr_conflict(struct nx_info *n, uint32_t a, const struct sock *s)
+{
+ return 1;
+}
+
+static inline
+int v4_ifa_in_nx_info(struct in_ifaddr *a, struct nx_info *n)
+{
+ return 1;
+}
+
+static inline
+int nx_info_has_v4(struct nx_info *nxi)
+{
+ return 0;
+}
+
+#endif /* CONFIG_INET */
+
+#define current_nx_info_has_v4() \
+ nx_info_has_v4(current_nx_info())
+
+#else
+// #warning duplicate inclusion
+#endif
--- a/include/linux/vs_limit.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_limit.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,140 @@
+#ifndef _VS_LIMIT_H
+#define _VS_LIMIT_H
+
+#include "vserver/limit.h"
+#include "vserver/base.h"
+#include "vserver/context.h"
+#include "vserver/debug.h"
+#include "vserver/context.h"
+#include "vserver/limit_int.h"
+
+
+#define vx_acc_cres(v, d, p, r) \
+ __vx_acc_cres(v, r, d, p, __FILE__, __LINE__)
+
+#define vx_acc_cres_cond(x, d, p, r) \
+ __vx_acc_cres(((x) == vx_current_xid()) ? current->vx_info : 0, \
+ r, d, p, __FILE__, __LINE__)
+
+
+#define vx_add_cres(v, a, p, r) \
+ __vx_add_cres(v, r, a, p, __FILE__, __LINE__)
+#define vx_sub_cres(v, a, p, r) vx_add_cres(v, -(a), p, r)
+
+#define vx_add_cres_cond(x, a, p, r) \
+ __vx_add_cres(((x) == vx_current_xid()) ? current->vx_info : 0, \
+ r, a, p, __FILE__, __LINE__)
+#define vx_sub_cres_cond(x, a, p, r) vx_add_cres_cond(x, -(a), p, r)
+
+
+/* process and file limits */
+
+#define vx_nproc_inc(p) \
+ vx_acc_cres((p)->vx_info, 1, p, RLIMIT_NPROC)
+
+#define vx_nproc_dec(p) \
+ vx_acc_cres((p)->vx_info,-1, p, RLIMIT_NPROC)
+
+#define vx_files_inc(f) \
+ vx_acc_cres_cond((f)->f_xid, 1, f, RLIMIT_NOFILE)
+
+#define vx_files_dec(f) \
+ vx_acc_cres_cond((f)->f_xid,-1, f, RLIMIT_NOFILE)
+
+#define vx_locks_inc(l) \
+ vx_acc_cres_cond((l)->fl_xid, 1, l, RLIMIT_LOCKS)
+
+#define vx_locks_dec(l) \
+ vx_acc_cres_cond((l)->fl_xid,-1, l, RLIMIT_LOCKS)
+
+#define vx_openfd_inc(f) \
+ vx_acc_cres(current->vx_info, 1, (void *)(long)(f), VLIMIT_OPENFD)
+
+#define vx_openfd_dec(f) \
+ vx_acc_cres(current->vx_info,-1, (void *)(long)(f), VLIMIT_OPENFD)
+
+
+#define vx_cres_avail(v, n, r) \
+ __vx_cres_avail(v, r, n, __FILE__, __LINE__)
+
+
+#define vx_nproc_avail(n) \
+ vx_cres_avail(current->vx_info, n, RLIMIT_NPROC)
+
+#define vx_files_avail(n) \
+ vx_cres_avail(current->vx_info, n, RLIMIT_NOFILE)
+
+#define vx_locks_avail(n) \
+ vx_cres_avail(current->vx_info, n, RLIMIT_LOCKS)
+
+#define vx_openfd_avail(n) \
+ vx_cres_avail(current->vx_info, n, VLIMIT_OPENFD)
+
+
+/* dentry limits */
+
+#define vx_dentry_inc(d) do { \
+ if (atomic_read(&d->d_count) == 1) \
+ vx_acc_cres(current->vx_info, 1, d, VLIMIT_DENTRY); \
+ } while (0)
+
+#define vx_dentry_dec(d) do { \
+ if (atomic_read(&d->d_count) == 0) \
+ vx_acc_cres(current->vx_info,-1, d, VLIMIT_DENTRY); \
+ } while (0)
+
+#define vx_dentry_avail(n) \
+ vx_cres_avail(current->vx_info, n, VLIMIT_DENTRY)
+
+
+/* socket limits */
+
+#define vx_sock_inc(s) \
+ vx_acc_cres((s)->sk_vx_info, 1, s, VLIMIT_NSOCK)
+
+#define vx_sock_dec(s) \
+ vx_acc_cres((s)->sk_vx_info,-1, s, VLIMIT_NSOCK)
+
+#define vx_sock_avail(n) \
+ vx_cres_avail(current->vx_info, n, VLIMIT_NSOCK)
+
+
+/* ipc resource limits */
+
+#define vx_ipcmsg_add(v, u, a) \
+ vx_add_cres(v, a, u, RLIMIT_MSGQUEUE)
+
+#define vx_ipcmsg_sub(v, u, a) \
+ vx_sub_cres(v, a, u, RLIMIT_MSGQUEUE)
+
+#define vx_ipcmsg_avail(v, a) \
+ vx_cres_avail(v, a, RLIMIT_MSGQUEUE)
+
+
+#define vx_ipcshm_add(v, k, a) \
+ vx_add_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
+
+#define vx_ipcshm_sub(v, k, a) \
+ vx_sub_cres(v, a, (void *)(long)(k), VLIMIT_SHMEM)
+
+#define vx_ipcshm_avail(v, a) \
+ vx_cres_avail(v, a, VLIMIT_SHMEM)
+
+
+#define vx_semary_inc(a) \
+ vx_acc_cres(current->vx_info, 1, a, VLIMIT_SEMARY)
+
+#define vx_semary_dec(a) \
+ vx_acc_cres(current->vx_info, -1, a, VLIMIT_SEMARY)
+
+
+#define vx_nsems_add(a,n) \
+ vx_add_cres(current->vx_info, n, a, VLIMIT_NSEMS)
+
+#define vx_nsems_sub(a,n) \
+ vx_sub_cres(current->vx_info, n, a, VLIMIT_NSEMS)
+
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_memory.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_memory.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,159 @@
+#ifndef _VS_MEMORY_H
+#define _VS_MEMORY_H
+
+#include "vserver/limit.h"
+#include "vserver/base.h"
+#include "vserver/context.h"
+#include "vserver/debug.h"
+#include "vserver/context.h"
+#include "vserver/limit_int.h"
+
+
+#define __acc_add_long(a, v) (*(v) += (a))
+#define __acc_inc_long(v) (++*(v))
+#define __acc_dec_long(v) (--*(v))
+
+#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
+#define __acc_add_atomic(a, v) atomic_long_add(a, v)
+#define __acc_inc_atomic(v) atomic_long_inc(v)
+#define __acc_dec_atomic(v) atomic_long_dec(v)
+#else /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+#define __acc_add_atomic(a, v) __acc_add_long(a, v)
+#define __acc_inc_atomic(v) __acc_inc_long(v)
+#define __acc_dec_atomic(v) __acc_dec_long(v)
+#endif /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
+
+
+#define vx_acc_page(m, d, v, r) do { \
+ if ((d) > 0) \
+ __acc_inc_long(&(m)->v); \
+ else \
+ __acc_dec_long(&(m)->v); \
+ __vx_acc_cres(m->mm_vx_info, r, d, m, __FILE__, __LINE__); \
+} while (0)
+
+#define vx_acc_page_atomic(m, d, v, r) do { \
+ if ((d) > 0) \
+ __acc_inc_atomic(&(m)->v); \
+ else \
+ __acc_dec_atomic(&(m)->v); \
+ __vx_acc_cres(m->mm_vx_info, r, d, m, __FILE__, __LINE__); \
+} while (0)
+
+
+#define vx_acc_pages(m, p, v, r) do { \
+ unsigned long __p = (p); \
+ __acc_add_long(__p, &(m)->v); \
+ __vx_add_cres(m->mm_vx_info, r, __p, m, __FILE__, __LINE__); \
+} while (0)
+
+#define vx_acc_pages_atomic(m, p, v, r) do { \
+ unsigned long __p = (p); \
+ __acc_add_atomic(__p, &(m)->v); \
+ __vx_add_cres(m->mm_vx_info, r, __p, m, __FILE__, __LINE__); \
+} while (0)
+
+
+
+#define vx_acc_vmpage(m, d) \
+ vx_acc_page(m, d, total_vm, RLIMIT_AS)
+#define vx_acc_vmlpage(m, d) \
+ vx_acc_page(m, d, locked_vm, RLIMIT_MEMLOCK)
+#define vx_acc_file_rsspage(m, d) \
+ vx_acc_page_atomic(m, d, _file_rss, VLIMIT_MAPPED)
+#define vx_acc_anon_rsspage(m, d) \
+ vx_acc_page_atomic(m, d, _anon_rss, VLIMIT_ANON)
+
+#define vx_acc_vmpages(m, p) \
+ vx_acc_pages(m, p, total_vm, RLIMIT_AS)
+#define vx_acc_vmlpages(m, p) \
+ vx_acc_pages(m, p, locked_vm, RLIMIT_MEMLOCK)
+#define vx_acc_file_rsspages(m, p) \
+ vx_acc_pages_atomic(m, p, _file_rss, VLIMIT_MAPPED)
+#define vx_acc_anon_rsspages(m, p) \
+ vx_acc_pages_atomic(m, p, _anon_rss, VLIMIT_ANON)
+
+#define vx_pages_add(s, r, p) __vx_add_cres(s, r, p, 0, __FILE__, __LINE__)
+#define vx_pages_sub(s, r, p) vx_pages_add(s, r, -(p))
+
+#define vx_vmpages_inc(m) vx_acc_vmpage(m, 1)
+#define vx_vmpages_dec(m) vx_acc_vmpage(m, -1)
+#define vx_vmpages_add(m, p) vx_acc_vmpages(m, p)
+#define vx_vmpages_sub(m, p) vx_acc_vmpages(m, -(p))
+
+#define vx_vmlocked_inc(m) vx_acc_vmlpage(m, 1)
+#define vx_vmlocked_dec(m) vx_acc_vmlpage(m, -1)
+#define vx_vmlocked_add(m, p) vx_acc_vmlpages(m, p)
+#define vx_vmlocked_sub(m, p) vx_acc_vmlpages(m, -(p))
+
+#define vx_file_rsspages_inc(m) vx_acc_file_rsspage(m, 1)
+#define vx_file_rsspages_dec(m) vx_acc_file_rsspage(m, -1)
+#define vx_file_rsspages_add(m, p) vx_acc_file_rsspages(m, p)
+#define vx_file_rsspages_sub(m, p) vx_acc_file_rsspages(m, -(p))
+
+#define vx_anon_rsspages_inc(m) vx_acc_anon_rsspage(m, 1)
+#define vx_anon_rsspages_dec(m) vx_acc_anon_rsspage(m, -1)
+#define vx_anon_rsspages_add(m, p) vx_acc_anon_rsspages(m, p)
+#define vx_anon_rsspages_sub(m, p) vx_acc_anon_rsspages(m, -(p))
+
+
+#define vx_pages_avail(m, p, r) \
+ __vx_cres_avail((m)->mm_vx_info, r, p, __FILE__, __LINE__)
+
+#define vx_vmpages_avail(m, p) vx_pages_avail(m, p, RLIMIT_AS)
+#define vx_vmlocked_avail(m, p) vx_pages_avail(m, p, RLIMIT_MEMLOCK)
+#define vx_anon_avail(m, p) vx_pages_avail(m, p, VLIMIT_ANON)
+#define vx_mapped_avail(m, p) vx_pages_avail(m, p, VLIMIT_MAPPED)
+
+#define vx_rss_avail(m, p) \
+ __vx_cres_array_avail((m)->mm_vx_info, VLA_RSS, p, __FILE__, __LINE__)
+
+
+enum {
+ VXPT_UNKNOWN = 0,
+ VXPT_ANON,
+ VXPT_NONE,
+ VXPT_FILE,
+ VXPT_SWAP,
+ VXPT_WRITE
+};
+
+#if 0
+#define vx_page_fault(mm, vma, type, ret)
+#else
+
+static inline
+void __vx_page_fault(struct mm_struct *mm,
+ struct vm_area_struct *vma, int type, int ret)
+{
+ struct vx_info *vxi = mm->mm_vx_info;
+ int what;
+/*
+ static char *page_type[6] =
+ { "UNKNOWN", "ANON", "NONE", "FILE", "SWAP", "WRITE" };
+ static char *page_what[4] =
+ { "FAULT_OOM", "FAULT_SIGBUS", "FAULT_MINOR", "FAULT_MAJOR" };
+*/
+
+ if (!vxi)
+ return;
+
+ what = (ret & 0x3);
+
+/* printk("[%d] page[%d][%d] %2x %s %s\n", vxi->vx_id,
+ type, what, ret, page_type[type], page_what[what]);
+*/
+ if (ret & VM_FAULT_WRITE)
+ what |= 0x4;
+ atomic_inc(&vxi->cacct.page[type][what]);
+}
+
+#define vx_page_fault(mm, vma, type, ret) __vx_page_fault(mm, vma, type, ret)
+#endif
+
+
+extern unsigned long vx_badness(struct task_struct *task, struct mm_struct *mm);
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_network.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_network.h 2008-04-29 18:42:49.000000000 -0400
@@ -0,0 +1,169 @@
+#ifndef _NX_VS_NETWORK_H
+#define _NX_VS_NETWORK_H
+
+#include "vserver/context.h"
+#include "vserver/network.h"
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/debug.h"
+
+#include <linux/sched.h>
+
+
+#define get_nx_info(i) __get_nx_info(i, __FILE__, __LINE__)
+
+static inline struct nx_info *__get_nx_info(struct nx_info *nxi,
+ const char *_file, int _line)
+{
+ if (!nxi)
+ return NULL;
+
+ vxlprintk(VXD_CBIT(nid, 2), "get_nx_info(%p[#%d.%d])",
+ nxi, nxi ? nxi->nx_id : 0,
+ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+ _file, _line);
+
+ atomic_inc(&nxi->nx_usecnt);
+ return nxi;
+}
+
+
+extern void free_nx_info(struct nx_info *);
+
+#define put_nx_info(i) __put_nx_info(i, __FILE__, __LINE__)
+
+static inline void __put_nx_info(struct nx_info *nxi, const char *_file, int _line)
+{
+ if (!nxi)
+ return;
+
+ vxlprintk(VXD_CBIT(nid, 2), "put_nx_info(%p[#%d.%d])",
+ nxi, nxi ? nxi->nx_id : 0,
+ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+ _file, _line);
+
+ if (atomic_dec_and_test(&nxi->nx_usecnt))
+ free_nx_info(nxi);
+}
+
+
+#define init_nx_info(p, i) __init_nx_info(p, i, __FILE__, __LINE__)
+
+static inline void __init_nx_info(struct nx_info **nxp, struct nx_info *nxi,
+ const char *_file, int _line)
+{
+ if (nxi) {
+ vxlprintk(VXD_CBIT(nid, 3),
+ "init_nx_info(%p[#%d.%d])",
+ nxi, nxi ? nxi->nx_id : 0,
+ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+ _file, _line);
+
+ atomic_inc(&nxi->nx_usecnt);
+ }
+ *nxp = nxi;
+}
+
+
+#define set_nx_info(p, i) __set_nx_info(p, i, __FILE__, __LINE__)
+
+static inline void __set_nx_info(struct nx_info **nxp, struct nx_info *nxi,
+ const char *_file, int _line)
+{
+ struct nx_info *nxo;
+
+ if (!nxi)
+ return;
+
+ vxlprintk(VXD_CBIT(nid, 3), "set_nx_info(%p[#%d.%d])",
+ nxi, nxi ? nxi->nx_id : 0,
+ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+ _file, _line);
+
+ atomic_inc(&nxi->nx_usecnt);
+ nxo = xchg(nxp, nxi);
+ BUG_ON(nxo);
+}
+
+#define clr_nx_info(p) __clr_nx_info(p, __FILE__, __LINE__)
+
+static inline void __clr_nx_info(struct nx_info **nxp,
+ const char *_file, int _line)
+{
+ struct nx_info *nxo;
+
+ nxo = xchg(nxp, NULL);
+ if (!nxo)
+ return;
+
+ vxlprintk(VXD_CBIT(nid, 3), "clr_nx_info(%p[#%d.%d])",
+ nxo, nxo ? nxo->nx_id : 0,
+ nxo ? atomic_read(&nxo->nx_usecnt) : 0,
+ _file, _line);
+
+ if (atomic_dec_and_test(&nxo->nx_usecnt))
+ free_nx_info(nxo);
+}
+
+
+#define claim_nx_info(v, p) __claim_nx_info(v, p, __FILE__, __LINE__)
+
+static inline void __claim_nx_info(struct nx_info *nxi,
+ struct task_struct *task, const char *_file, int _line)
+{
+ vxlprintk(VXD_CBIT(nid, 3), "claim_nx_info(%p[#%d.%d.%d]) %p",
+ nxi, nxi ? nxi->nx_id : 0,
+ nxi?atomic_read(&nxi->nx_usecnt):0,
+ nxi?atomic_read(&nxi->nx_tasks):0,
+ task, _file, _line);
+
+ atomic_inc(&nxi->nx_tasks);
+}
+
+
+extern void unhash_nx_info(struct nx_info *);
+
+#define release_nx_info(v, p) __release_nx_info(v, p, __FILE__, __LINE__)
+
+static inline void __release_nx_info(struct nx_info *nxi,
+ struct task_struct *task, const char *_file, int _line)
+{
+ vxlprintk(VXD_CBIT(nid, 3), "release_nx_info(%p[#%d.%d.%d]) %p",
+ nxi, nxi ? nxi->nx_id : 0,
+ nxi ? atomic_read(&nxi->nx_usecnt) : 0,
+ nxi ? atomic_read(&nxi->nx_tasks) : 0,
+ task, _file, _line);
+
+ might_sleep();
+
+ if (atomic_dec_and_test(&nxi->nx_tasks))
+ unhash_nx_info(nxi);
+}
+
+
+#define task_get_nx_info(i) __task_get_nx_info(i, __FILE__, __LINE__)
+
+static __inline__ struct nx_info *__task_get_nx_info(struct task_struct *p,
+ const char *_file, int _line)
+{
+ struct nx_info *nxi;
+
+ task_lock(p);
+ vxlprintk(VXD_CBIT(nid, 5), "task_get_nx_info(%p)",
+ p, _file, _line);
+ nxi = __get_nx_info(p->nx_info, _file, _line);
+ task_unlock(p);
+ return nxi;
+}
+
+
+static inline void exit_nx_info(struct task_struct *p)
+{
+ if (p->nx_info)
+ release_nx_info(p->nx_info, p);
+}
+
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_pid.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_pid.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,95 @@
+#ifndef _VS_PID_H
+#define _VS_PID_H
+
+#include "vserver/base.h"
+#include "vserver/check.h"
+#include "vserver/context.h"
+#include "vserver/debug.h"
+#include "vserver/pid.h"
+#include <linux/pid_namespace.h>
+
+
+#define VXF_FAKE_INIT (VXF_INFO_INIT | VXF_STATE_INIT)
+
+static inline
+int vx_proc_task_visible(struct task_struct *task)
+{
+ if ((task->pid == 1) &&
+ !vx_flags(VXF_FAKE_INIT, VXF_FAKE_INIT))
+ /* show a blend through init */
+ goto visible;
+ if (vx_check(vx_task_xid(task), VS_WATCH | VS_IDENT))
+ goto visible;
+ return 0;
+visible:
+ return 1;
+}
+
+#define find_task_by_real_pid find_task_by_pid
+
+#if 0
+
+static inline
+struct task_struct *vx_find_proc_task_by_pid(int pid)
+{
+ struct task_struct *task = find_task_by_real_pid(pid);
+
+ if (task && !vx_proc_task_visible(task)) {
+ vxdprintk(VXD_CBIT(misc, 6),
+ "dropping task (find) %p[#%u,%u] for %p[#%u,%u]",
+ task, task->xid, task->pid,
+ current, current->xid, current->pid);
+ task = NULL;
+ }
+ return task;
+}
+
+#endif
+
+static inline
+struct task_struct *vx_get_proc_task(struct inode *inode, struct pid *pid)
+{
+ struct task_struct *task = get_pid_task(pid, PIDTYPE_PID);
+
+ if (task && !vx_proc_task_visible(task)) {
+ vxdprintk(VXD_CBIT(misc, 6),
+ "dropping task (get) %p[#%u,%u] for %p[#%u,%u]",
+ task, task->xid, task->pid,
+ current, current->xid, current->pid);
+ put_task_struct(task);
+ task = NULL;
+ }
+ return task;
+}
+
+#if 0
+
+static inline
+struct task_struct *vx_child_reaper(struct task_struct *p)
+{
+ struct vx_info *vxi = p->vx_info;
+ struct task_struct *reaper = child_reaper(p);
+
+ if (!vxi)
+ goto out;
+
+ BUG_ON(!p->vx_info->vx_reaper);
+
+ /* child reaper for the guest reaper */
+ if (vxi->vx_reaper == p)
+ goto out;
+
+ reaper = vxi->vx_reaper;
+out:
+ vxdprintk(VXD_CBIT(xid, 7),
+ "vx_child_reaper(%p[#%u,%u]) = %p[#%u,%u]",
+ p, p->xid, p->pid, reaper, reaper->xid, reaper->pid);
+ return reaper;
+}
+
+#endif
+
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_sched.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_sched.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,110 @@
+#ifndef _VS_SCHED_H
+#define _VS_SCHED_H
+
+#include "vserver/base.h"
+#include "vserver/context.h"
+#include "vserver/sched.h"
+
+
+#define VAVAVOOM_RATIO 50
+
+#define MAX_PRIO_BIAS 20
+#define MIN_PRIO_BIAS -20
+
+
+#ifdef CONFIG_VSERVER_HARDCPU
+
+/*
+ * effective_prio - return the priority that is based on the static
+ * priority but is modified by bonuses/penalties.
+ *
+ * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
+ * into a -4 ... 0 ... +4 bonus/penalty range.
+ *
+ * Additionally, we scale another amount based on the number of
+ * CPU tokens currently held by the context, if the process is
+ * part of a context (and the appropriate SCHED flag is set).
+ * This ranges from -5 ... 0 ... +15, quadratically.
+ *
+ * So, the total bonus is -9 .. 0 .. +19
+ * We use ~50% of the full 0...39 priority range so that:
+ *
+ * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
+ * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
+ * unless that context is far exceeding its CPU allocation.
+ *
+ * Both properties are important to certain workloads.
+ */
+static inline
+int vx_effective_vavavoom(struct _vx_sched_pc *sched_pc, int max_prio)
+{
+ int vavavoom, max;
+
+ /* lots of tokens = lots of vavavoom
+ * no tokens = no vavavoom */
+ if ((vavavoom = sched_pc->tokens) >= 0) {
+ max = sched_pc->tokens_max;
+ vavavoom = max - vavavoom;
+ max = max * max;
+ vavavoom = max_prio * VAVAVOOM_RATIO / 100
+ * (vavavoom*vavavoom - (max >> 2)) / max;
+ return vavavoom;
+ }
+ return 0;
+}
+
+
+static inline
+int vx_adjust_prio(struct task_struct *p, int prio, int max_user)
+{
+ struct vx_info *vxi = p->vx_info;
+ struct _vx_sched_pc *sched_pc;
+
+ if (!vxi)
+ return prio;
+
+ sched_pc = &vx_cpu(vxi, sched_pc);
+ if (vx_info_flags(vxi, VXF_SCHED_PRIO, 0)) {
+ int vavavoom = vx_effective_vavavoom(sched_pc, max_user);
+
+ sched_pc->vavavoom = vavavoom;
+ prio += vavavoom;
+ }
+ prio += sched_pc->prio_bias;
+ return prio;
+}
+
+#else /* !CONFIG_VSERVER_HARDCPU */
+
+static inline
+int vx_adjust_prio(struct task_struct *p, int prio, int max_user)
+{
+ struct vx_info *vxi = p->vx_info;
+
+ if (vxi)
+ prio += vx_cpu(vxi, sched_pc).prio_bias;
+ return prio;
+}
+
+#endif /* CONFIG_VSERVER_HARDCPU */
+
+
+static inline void vx_account_user(struct vx_info *vxi,
+ cputime_t cputime, int nice)
+{
+ if (!vxi)
+ return;
+ vx_cpu(vxi, sched_pc).user_ticks += cputime;
+}
+
+static inline void vx_account_system(struct vx_info *vxi,
+ cputime_t cputime, int idle)
+{
+ if (!vxi)
+ return;
+ vx_cpu(vxi, sched_pc).sys_ticks += cputime;
+}
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_socket.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_socket.h 2008-04-23 14:32:00.000000000 -0400
@@ -0,0 +1,67 @@
+#ifndef _VS_SOCKET_H
+#define _VS_SOCKET_H
+
+#include "vserver/debug.h"
+#include "vserver/base.h"
+#include "vserver/cacct.h"
+#include "vserver/context.h"
+#include "vserver/tag.h"
+
+
+/* socket accounting */
+
+#include <linux/socket.h>
+
+static inline int vx_sock_type(int family)
+{
+ switch (family) {
+ case PF_UNSPEC:
+ return VXA_SOCK_UNSPEC;
+ case PF_UNIX:
+ return VXA_SOCK_UNIX;
+ case PF_INET:
+ return VXA_SOCK_INET;
+ case PF_INET6:
+ return VXA_SOCK_INET6;
+ case PF_PACKET:
+ return VXA_SOCK_PACKET;
+ default:
+ return VXA_SOCK_OTHER;
+ }
+}
+
+#define vx_acc_sock(v, f, p, s) \
+ __vx_acc_sock(v, f, p, s, __FILE__, __LINE__)
+
+static inline void __vx_acc_sock(struct vx_info *vxi,
+ int family, int pos, int size, char *file, int line)
+{
+ if (vxi) {
+ int type = vx_sock_type(family);
+
+ atomic_long_inc(&vxi->cacct.sock[type][pos].count);
+ atomic_long_add(size, &vxi->cacct.sock[type][pos].total);
+ }
+}
+
+#define vx_sock_recv(sk, s) \
+ vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 0, s)
+#define vx_sock_send(sk, s) \
+ vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 1, s)
+#define vx_sock_fail(sk, s) \
+ vx_acc_sock((sk)->sk_vx_info, (sk)->sk_family, 2, s)
+
+
+#define sock_vx_init(s) do { \
+ (s)->sk_xid = 0; \
+ (s)->sk_vx_info = NULL; \
+ } while (0)
+
+#define sock_nx_init(s) do { \
+ (s)->sk_nid = 0; \
+ (s)->sk_nx_info = NULL; \
+ } while (0)
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_tag.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_tag.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,43 @@
+#ifndef _VS_TAG_H
+#define _VS_TAG_H
+
+#include <linux/vserver/tag.h>
+
+/* check conditions */
+
+#define DX_ADMIN 0x0001
+#define DX_WATCH 0x0002
+#define DX_HOSTID 0x0008
+
+#define DX_IDENT 0x0010
+
+#define DX_ARG_MASK 0x0010
+
+
+#define dx_task_tag(t) ((t)->tag)
+
+#define dx_current_tag() dx_task_tag(current)
+
+#define dx_check(c, m) __dx_check(dx_current_tag(), c, m)
+
+#define dx_weak_check(c, m) ((m) ? dx_check(c, m) : 1)
+
+
+/*
+ * check current context for ADMIN/WATCH and
+ * optionally against supplied argument
+ */
+static inline int __dx_check(tag_t cid, tag_t id, unsigned int mode)
+{
+ if (mode & DX_ARG_MASK) {
+ if ((mode & DX_IDENT) && (id == cid))
+ return 1;
+ }
+ return (((mode & DX_ADMIN) && (cid == 0)) ||
+ ((mode & DX_WATCH) && (cid == 1)) ||
+ ((mode & DX_HOSTID) && (id == 0)));
+}
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/linux/vs_time.h 1969-12-31 19:00:00.000000000 -0500
+++ a/include/linux/vs_time.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,19 @@
+#ifndef _VS_TIME_H
+#define _VS_TIME_H
+
+
+/* time faking stuff */
+
+#ifdef CONFIG_VSERVER_VTIME
+
+extern void vx_gettimeofday(struct timeval *tv);
+extern int vx_settimeofday(struct timespec *ts);
+
+#else
+#define vx_gettimeofday(t) do_gettimeofday(t)
+#define vx_settimeofday(t) do_settimeofday(t)
+#endif
+
+#else
+#warning duplicate inclusion
+#endif
--- a/include/net/addrconf.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/net/addrconf.h 2008-04-19 15:14:52.000000000 -0400
@@ -75,10 +75,12 @@ extern struct inet6_ifaddr *ipv6_ge
extern int ipv6_get_saddr(struct dst_entry *dst,
struct in6_addr *daddr,
- struct in6_addr *saddr);
+ struct in6_addr *saddr,
+ struct nx_info *nxi);
extern int ipv6_dev_get_saddr(struct net_device *dev,
struct in6_addr *daddr,
- struct in6_addr *saddr);
+ struct in6_addr *saddr,
+ struct nx_info *nxi);
extern int ipv6_get_lladdr(struct net_device *dev,
struct in6_addr *addr,
unsigned char banned_flags);
--- a/include/net/af_unix.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/net/af_unix.h 2008-04-19 15:14:52.000000000 -0400
@@ -4,6 +4,7 @@
#include <linux/socket.h>
#include <linux/un.h>
#include <linux/mutex.h>
+#include <linux/vs_base.h>
#include <net/sock.h>
extern void unix_inflight(struct file *fp);
--- a/include/net/inet_sock.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/net/inet_sock.h 2008-04-19 15:14:52.000000000 -0400
@@ -24,7 +24,7 @@
#include <net/flow.h>
#include <net/sock.h>
#include <net/request_sock.h>
-#include <net/route.h>
+// #include <net/route.h>
/** struct ip_options - IP Options
*
@@ -193,9 +193,4 @@ static inline int inet_sk_ehashfn(const
}
-static inline int inet_iif(const struct sk_buff *skb)
-{
- return ((struct rtable *)skb->dst)->rt_iif;
-}
-
#endif /* _INET_SOCK_H */
--- a/include/net/inet_timewait_sock.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/net/inet_timewait_sock.h 2008-04-19 15:25:34.000000000 -0400
@@ -15,15 +15,14 @@
#ifndef _INET_TIMEWAIT_SOCK_
#define _INET_TIMEWAIT_SOCK_
+// #include <net/inet_sock.h>
+#include <net/sock.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/types.h>
#include <linux/workqueue.h>
-
-#include <net/inet_sock.h>
-#include <net/sock.h>
#include <net/tcp_states.h>
#include <net/timewait_sock.h>
@@ -116,6 +115,10 @@ struct inet_timewait_sock {
#define tw_hash __tw_common.skc_hash
#define tw_prot __tw_common.skc_prot
#define tw_net __tw_common.skc_net
+#define tw_xid __tw_common.skc_xid
+#define tw_vx_info __tw_common.skc_vx_info
+#define tw_nid __tw_common.skc_nid
+#define tw_nx_info __tw_common.skc_nx_info
int tw_timeout;
volatile unsigned char tw_substate;
/* 3 bits hole, try to pack */
--- a/include/net/route.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/net/route.h 2008-04-21 12:39:35.000000000 -0400
@@ -34,7 +34,7 @@
#include <linux/ip.h>
#include <linux/cache.h>
#include <linux/security.h>
-#include <net/sock.h>
+#include <linux/in.h>
#ifndef __KERNEL__
#warning This file is not supposed to be used outside of kernel.
@@ -86,6 +86,11 @@ struct ip_rt_acct
__u32 i_packets;
};
+static inline int inet_iif(const struct sk_buff *skb)
+{
+ return ((struct rtable *)skb->dst)->rt_iif;
+}
+
struct rt_cache_stat
{
unsigned int in_hit;
@@ -136,6 +141,9 @@ static inline void ip_rt_put(struct rtab
dst_release(&rt->u.dst);
}
+#include <linux/vs_base.h>
+#include <linux/vs_inet.h>
+
#define IPTOS_RT_MASK (IPTOS_TOS_MASK & ~3)
extern const __u8 ip_tos2prio[16];
@@ -145,6 +153,9 @@ static inline char rt_tos2priority(u8 to
return ip_tos2prio[IPTOS_TOS(tos)>>1];
}
+extern int ip_v4_find_src(struct net *net, struct nx_info *,
+ struct rtable **, struct flowi *);
+
static inline int ip_route_connect(struct rtable **rp, __be32 dst,
__be32 src, u32 tos, int oif, u8 protocol,
__be16 sport, __be16 dport, struct sock *sk,
@@ -162,7 +173,21 @@ static inline int ip_route_connect(struc
int err;
struct net *net = sk->sk_net;
- if (!dst || !src) {
+ struct nx_info *nx_info = current->nx_info;
+
+ if (sk)
+ nx_info = sk->sk_nx_info;
+
+ vxdprintk(VXD_CBIT(net, 4),
+ "ip_route_connect(%p) %p,%p;%lx",
+ sk, nx_info, sk->sk_socket,
+ (sk->sk_socket?sk->sk_socket->flags:0));
+
+ err = ip_v4_find_src(net, nx_info, rp, &fl);
+ if (err)
+ return err;
+
+ if (!fl.fl4_dst || !fl.fl4_src) {
err = __ip_route_output_key(net, rp, &fl);
if (err)
return err;
--- a/include/net/sock.h 2008-04-17 12:05:44.000000000 -0400
+++ a/include/net/sock.h 2008-04-19 15:14:52.000000000 -0400
@@ -123,6 +123,10 @@ struct sock_common {
unsigned int skc_hash;
struct proto *skc_prot;
struct net *skc_net;
+ xid_t skc_xid;
+ struct vx_info *skc_vx_info;
+ nid_t skc_nid;
+ struct nx_info *skc_nx_info;
};
/**
@@ -205,6 +209,10 @@ struct sock {
#define sk_hash __sk_common.skc_hash
#define sk_prot __sk_common.skc_prot
#define sk_net __sk_common.skc_net
+#define sk_xid __sk_common.skc_xid
+#define sk_vx_info __sk_common.skc_vx_info
+#define sk_nid __sk_common.skc_nid
+#define sk_nx_info __sk_common.skc_nx_info
unsigned char sk_shutdown : 2,
sk_no_check : 2,
sk_userlocks : 4;
--- a/init/main.c 2008-04-17 12:05:44.000000000 -0400
+++ a/init/main.c 2008-04-21 10:46:10.000000000 -0400
@@ -58,6 +58,7 @@
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/signal.h>
+#include <linux/vserver/percpu.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -370,12 +371,14 @@ EXPORT_SYMBOL(__per_cpu_offset);
static void __init setup_per_cpu_areas(void)
{
- unsigned long size, i;
+ unsigned long size, vspc, i;
char *ptr;
unsigned long nr_possible_cpus = num_possible_cpus();
+ vspc = PERCPU_PERCTX * CONFIG_VSERVER_CONTEXTS;
+
/* Copy section for each CPU (we discard the original) */
- size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
+ size = ALIGN(PERCPU_ENOUGH_ROOM + vspc, PAGE_SIZE);
ptr = alloc_bootmem_pages(size * nr_possible_cpus);
for_each_possible_cpu(i) {
--- a/ipc/mqueue.c 2008-04-17 12:05:44.000000000 -0400
+++ a/ipc/mqueue.c 2008-04-19 15:14:52.000000000 -0400
@@ -31,6 +31,8 @@
#include <linux/mutex.h>
#include <linux/nsproxy.h>
#include <linux/pid.h>
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
#include <net/sock.h>
#include "util.h"
@@ -71,6 +73,7 @@ struct mqueue_inode_info {
struct sigevent notify;
struct pid* notify_owner;
struct user_struct *user; /* user who created, for accounting */
+ struct vx_info *vxi;
struct sock *notify_sock;
struct sk_buff *notify_cookie;
@@ -119,6 +122,7 @@ static struct inode *mqueue_get_inode(st
struct mqueue_inode_info *info;
struct task_struct *p = current;
struct user_struct *u = p->user;
+ struct vx_info *vxi = p->vx_info;
unsigned long mq_bytes, mq_msg_tblsz;
inode->i_fop = &mqueue_file_operations;
@@ -133,6 +137,7 @@ static struct inode *mqueue_get_inode(st
info->notify_owner = NULL;
info->qsize = 0;
info->user = NULL; /* set when all is ok */
+ info->vxi = NULL;
memset(&info->attr, 0, sizeof(info->attr));
info->attr.mq_maxmsg = DFLT_MSGMAX;
info->attr.mq_msgsize = DFLT_MSGSIZEMAX;
@@ -147,22 +152,26 @@ static struct inode *mqueue_get_inode(st
spin_lock(&mq_lock);
if (u->mq_bytes + mq_bytes < u->mq_bytes ||
u->mq_bytes + mq_bytes >
- p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) {
+ p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur ||
+ !vx_ipcmsg_avail(vxi, mq_bytes)) {
spin_unlock(&mq_lock);
goto out_inode;
}
u->mq_bytes += mq_bytes;
+ vx_ipcmsg_add(vxi, u, mq_bytes);
spin_unlock(&mq_lock);
info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL);
if (!info->messages) {
spin_lock(&mq_lock);
u->mq_bytes -= mq_bytes;
+ vx_ipcmsg_sub(vxi, u, mq_bytes);
spin_unlock(&mq_lock);
goto out_inode;
}
/* all is ok */
info->user = get_uid(u);
+ info->vxi = get_vx_info(vxi);
} else if (S_ISDIR(mode)) {
inc_nlink(inode);
/* Some things misbehave if size == 0 on a directory */
@@ -253,10 +262,14 @@ static void mqueue_delete_inode(struct i
(info->attr.mq_maxmsg * info->attr.mq_msgsize));
user = info->user;
if (user) {
+ struct vx_info *vxi = info->vxi;
+
spin_lock(&mq_lock);
user->mq_bytes -= mq_bytes;
+ vx_ipcmsg_sub(vxi, user, mq_bytes);
queues_count--;
spin_unlock(&mq_lock);
+ put_vx_info(vxi);
free_uid(user);
}
}
@@ -743,7 +756,7 @@ asmlinkage long sys_mq_unlink(const char
if (inode)
atomic_inc(&inode->i_count);
- err = vfs_unlink(dentry->d_parent->d_inode, dentry);
+ err = vfs_unlink(dentry->d_parent->d_inode, dentry, NULL);
out_err:
dput(dentry);
--- a/ipc/msg.c 2008-04-17 12:05:44.000000000 -0400
+++ a/ipc/msg.c 2008-04-21 10:41:47.000000000 -0400
@@ -37,6 +37,7 @@
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
+#include <linux/vs_base.h>
#include <asm/current.h>
#include <asm/uaccess.h>
@@ -168,6 +169,7 @@ static int newque(struct ipc_namespace *
msq->q_perm.mode = msgflg & S_IRWXUGO;
msq->q_perm.key = key;
+ msq->q_perm.xid = vx_current_xid();
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
--- a/ipc/namespace.c 2008-04-17 12:05:44.000000000 -0400
+++ a/ipc/namespace.c 2008-04-21 10:44:58.000000000 -0400
@@ -9,6 +9,8 @@
#include <linux/rcupdate.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
+#include <linux/vs_base.h>
+#include <linux/vserver/global.h>
#include "util.h"
@@ -25,6 +27,7 @@ static struct ipc_namespace *clone_ipc_n
shm_init_ns(ns);
kref_init(&ns->kref);
+ atomic_inc(&vs_global_ipc_ns);
return ns;
}
@@ -82,5 +85,6 @@ void free_ipc_ns(struct kref *kref)
sem_exit_ns(ns);
msg_exit_ns(ns);
shm_exit_ns(ns);
+ atomic_dec(&vs_global_ipc_ns);
kfree(ns);
}
--- a/ipc/sem.c 2008-04-17 12:05:44.000000000 -0400
+++ a/ipc/sem.c 2008-04-21 10:45:22.000000000 -0400
@@ -83,6 +83,8 @@
#include <linux/rwsem.h>
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
#include <asm/uaccess.h>
#include "util.h"
@@ -252,6 +254,7 @@ static int newary(struct ipc_namespace *
sma->sem_perm.mode = (semflg & S_IRWXUGO);
sma->sem_perm.key = key;
+ sma->sem_perm.xid = vx_current_xid();
sma->sem_perm.security = NULL;
retval = security_sem_alloc(sma);
@@ -267,6 +270,9 @@ static int newary(struct ipc_namespace *
return id;
}
ns->used_sems += nsems;
+ /* FIXME: obsoleted? */
+ vx_semary_inc(sma);
+ vx_nsems_add(sma, nsems);
sma->sem_perm.id = sem_buildid(id, sma->sem_perm.seq);
sma->sem_base = (struct sem *) &sma[1];
--- a/ipc/shm.c 2008-04-17 12:05:44.000000000 -0400
+++ a/ipc/shm.c 2008-04-21 10:45:38.000000000 -0400
@@ -39,6 +39,8 @@
#include <linux/nsproxy.h>
#include <linux/mount.h>
#include <linux/ipc_namespace.h>
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
#include <asm/uaccess.h>
@@ -202,7 +204,12 @@ static void shm_open(struct vm_area_stru
*/
static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp)
{
- ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ struct vx_info *vxi = lookup_vx_info(shp->shm_perm.xid);
+ int numpages = (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT;
+
+ vx_ipcshm_sub(vxi, shp, numpages);
+ ns->shm_tot -= numpages;
+
shm_rmid(ns, shp);
shm_unlock(shp);
if (!is_file_hugepages(shp->shm_file))
@@ -212,6 +219,7 @@ static void shm_destroy(struct ipc_names
shp->mlock_user);
fput (shp->shm_file);
security_shm_free(shp);
+ put_vx_info(vxi);
ipc_rcu_putref(shp);
}
@@ -383,11 +391,15 @@ static int newseg(struct ipc_namespace *
if (ns->shm_tot + numpages > ns->shm_ctlall)
return -ENOSPC;
+ if (!vx_ipcshm_avail(current->vx_info, numpages))
+ return -ENOSPC;
+
shp = ipc_rcu_alloc(sizeof(*shp));
if (!shp)
return -ENOMEM;
shp->shm_perm.key = key;
+ shp->shm_perm.xid = vx_current_xid();
shp->shm_perm.mode = (shmflg & S_IRWXUGO);
shp->mlock_user = NULL;
@@ -441,6 +453,7 @@ static int newseg(struct ipc_namespace *
ns->shm_tot += numpages;
error = shp->shm_perm.id;
shm_unlock(shp);
+ vx_ipcshm_add(current->vx_info, key, numpages);
return error;
no_id:
--- a/kernel/capability.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/capability.c 2008-04-19 15:14:52.000000000 -0400
@@ -13,6 +13,7 @@
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/pid_namespace.h>
+#include <linux/vs_context.h>
#include <asm/uaccess.h>
/*
@@ -171,6 +172,8 @@ static inline int cap_set_pg(int pgrp_nr
pgrp = find_vpid(pgrp_nr);
do_each_pid_task(pgrp, PIDTYPE_PGID, g) {
+ if (!vx_check(g->xid, VS_ADMIN_P | VS_IDENT))
+ continue;
target = g;
while_each_thread(g, target) {
if (!security_capset_check(target, effective,
@@ -335,8 +338,12 @@ int __capable(struct task_struct *t, int
return 0;
}
+#include <linux/vserver/base.h>
int capable(int cap)
{
+ /* here for now so we don't require task locking */
+ if (vs_check_bit(VXC_CAP_MASK, cap) && !vx_mcaps(1L << cap))
+ return 0;
return __capable(current, cap);
}
EXPORT_SYMBOL(capable);
--- a/kernel/cgroup.c 2008-05-21 14:30:05.000000000 -0400
+++ a/kernel/cgroup.c 2008-05-21 14:30:41.000000000 -0400
@@ -2833,7 +2833,7 @@ int cgroup_clone(struct task_struct *tsk
}
/* Create the cgroup directory, which also creates the cgroup */
- ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755);
+ ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755, NULL);
child = __d_cgrp(dentry);
dput(dentry);
if (ret) {
--- a/kernel/compat.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/compat.c 2008-04-19 15:14:52.000000000 -0400
@@ -846,7 +846,7 @@ asmlinkage long compat_sys_time(compat_t
compat_time_t i;
struct timeval tv;
- do_gettimeofday(&tv);
+ vx_gettimeofday(&tv);
i = tv.tv_sec;
if (tloc) {
@@ -870,7 +870,7 @@ asmlinkage long compat_sys_stime(compat_
if (err)
return err;
- do_settimeofday(&tv);
+ vx_settimeofday(&tv);
return 0;
}
--- a/kernel/exit.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/exit.c 2008-04-19 15:14:52.000000000 -0400
@@ -44,6 +44,11 @@
#include <linux/resource.h>
#include <linux/blkdev.h>
#include <linux/task_io_accounting_ops.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/global.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -468,9 +473,11 @@ static void close_files(struct files_str
filp_close(file, files);
cond_resched();
}
+ vx_openfd_dec(i);
}
i++;
set >>= 1;
+ cond_resched();
}
}
}
@@ -1014,6 +1021,10 @@ NORET_TYPE void do_exit(long code)
if (tsk->splice_pipe)
__free_pipe_info(tsk->splice_pipe);
+ /* needs to stay after exit_notify() */
+ exit_vx_info(tsk, code);
+ exit_nx_info(tsk);
+
preempt_disable();
/* causes final put_task_struct in finish_task_switch(). */
tsk->state = TASK_DEAD;
--- a/kernel/fork.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/fork.c 2008-04-21 11:09:01.000000000 -0400
@@ -53,6 +53,11 @@
#include <linux/tty.h>
#include <linux/proc_fs.h>
#include <linux/blkdev.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_memory.h>
+#include <linux/vserver/global.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
@@ -113,6 +118,8 @@ void free_task(struct task_struct *tsk)
prop_local_destroy_single(&tsk->dirties);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
+ clr_vx_info(&tsk->vx_info);
+ clr_nx_info(&tsk->nx_info);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
@@ -229,6 +236,8 @@ static int dup_mmap(struct mm_struct *mm
mm->free_area_cache = oldmm->mmap_base;
mm->cached_hole_size = ~0UL;
mm->map_count = 0;
+ __set_mm_counter(mm, file_rss, 0);
+ __set_mm_counter(mm, anon_rss, 0);
cpus_clear(mm->cpu_vm_mask);
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
@@ -240,7 +249,7 @@ static int dup_mmap(struct mm_struct *mm
if (mpnt->vm_flags & VM_DONTCOPY) {
long pages = vma_pages(mpnt);
- mm->total_vm -= pages;
+ vx_vmpages_sub(mm, pages);
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-pages);
continue;
@@ -351,8 +360,8 @@ static struct mm_struct * mm_init(struct
: MMF_DUMP_FILTER_DEFAULT;
mm->core_waiters = 0;
mm->nr_ptes = 0;
- set_mm_counter(mm, file_rss, 0);
- set_mm_counter(mm, anon_rss, 0);
+ __set_mm_counter(mm, file_rss, 0);
+ __set_mm_counter(mm, anon_rss, 0);
spin_lock_init(&mm->page_table_lock);
rwlock_init(&mm->ioctx_list_lock);
mm->ioctx_list = NULL;
@@ -362,6 +371,7 @@ static struct mm_struct * mm_init(struct
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
+ set_vx_info(&mm->mm_vx_info, p->vx_info);
return mm;
}
@@ -395,6 +405,7 @@ void __mmdrop(struct mm_struct *mm)
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
+ clr_vx_info(&mm->mm_vx_info);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
@@ -511,6 +522,7 @@ static struct mm_struct *dup_mm(struct t
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
+ mm->mm_vx_info = NULL;
/* Initializing for Swap token stuff */
mm->token_priority = 0;
@@ -542,6 +554,7 @@ fail_nocontext:
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
*/
+ clr_vx_info(&mm->mm_vx_info);
mm_free_pgd(mm);
free_mm(mm);
return NULL;
@@ -612,6 +625,7 @@ static struct fs_struct *__copy_fs_struc
fs->altroot.dentry = NULL;
}
read_unlock(&old->lock);
+ atomic_inc(&vs_global_fs);
}
return fs;
}
@@ -730,6 +744,8 @@ static struct files_struct *dup_fd(struc
struct file *f = *old_fds++;
if (f) {
get_file(f);
+ /* TODO: sum it first for check and performance */
+ vx_openfd_inc(open_files - i);
} else {
/*
* The fd may be claimed in the fd bitmap but not yet
@@ -1011,6 +1027,8 @@ static struct task_struct *copy_process(
int retval;
struct task_struct *p;
int cgroup_callbacks_done = 0;
+ struct vx_info *vxi;
+ struct nx_info *nxi;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
@@ -1045,12 +1063,28 @@ static struct task_struct *copy_process(
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
+ init_vx_info(&p->vx_info, current->vx_info);
+ init_nx_info(&p->nx_info, current->nx_info);
+
+ /* check vserver memory */
+ if (p->mm && !(clone_flags & CLONE_VM)) {
+ if (vx_vmpages_avail(p->mm, p->mm->total_vm))
+ vx_pages_add(p->vx_info, RLIMIT_AS, p->mm->total_vm);
+ else
+ goto bad_fork_free;
+ }
+ if (p->mm && vx_flags(VXF_FORK_RSS, 0)) {
+ if (!vx_rss_avail(p->mm, get_mm_counter(p->mm, file_rss)))
+ goto bad_fork_cleanup_vm;
+ }
retval = -EAGAIN;
+ if (!vx_nproc_avail(1))
+ goto bad_fork_cleanup_vm;
if (atomic_read(&p->user->processes) >=
p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->user != current->nsproxy->user_ns->root_user)
- goto bad_fork_free;
+ goto bad_fork_cleanup_vm;
}
atomic_inc(&p->user->__count);
@@ -1352,6 +1386,18 @@ static struct task_struct *copy_process(
total_forks++;
spin_unlock(&current->sighand->siglock);
+
+ /* p is copy of current */
+ vxi = p->vx_info;
+ if (vxi) {
+ claim_vx_info(vxi, p);
+ atomic_inc(&vxi->cvirt.nr_threads);
+ atomic_inc(&vxi->cvirt.total_forks);
+ vx_nproc_inc(p);
+ }
+ nxi = p->nx_info;
+ if (nxi)
+ claim_nx_info(nxi, p);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
@@ -1398,6 +1444,9 @@ bad_fork_cleanup_count:
put_group_info(p->group_info);
atomic_dec(&p->user->processes);
free_uid(p->user);
+bad_fork_cleanup_vm:
+ if (p->mm && !(clone_flags & CLONE_VM))
+ vx_pages_sub(p->vx_info, RLIMIT_AS, p->mm->total_vm);
bad_fork_free:
free_task(p);
fork_out:
--- a/kernel/kthread.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/kthread.c 2008-04-19 15:14:52.000000000 -0400
@@ -13,6 +13,7 @@
#include <linux/file.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/vs_pid.h>
#include <asm/semaphore.h>
#define KTHREAD_NICE_LEVEL (-5)
@@ -99,7 +100,7 @@ static void create_kthread(struct kthrea
struct sched_param param = { .sched_priority = 0 };
wait_for_completion(&create->started);
read_lock(&tasklist_lock);
- create->result = find_task_by_pid(pid);
+ create->result = find_task_by_real_pid(pid);
read_unlock(&tasklist_lock);
/*
* root may have changed our (kthreadd's) priority or CPU mask.
--- a/kernel/Makefile 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/Makefile 2008-04-19 15:14:52.000000000 -0400
@@ -11,6 +11,8 @@ obj-y = sched.o fork.o exec_domain.o
hrtimer.o rwsem.o nsproxy.o srcu.o \
notifier.o ksysfs.o pm_qos_params.o
+obj-y += vserver/
+
obj-$(CONFIG_SYSCTL) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-y += time/
--- a/kernel/nsproxy.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/nsproxy.c 2008-04-19 15:14:52.000000000 -0400
@@ -20,6 +20,8 @@
#include <linux/mnt_namespace.h>
#include <linux/utsname.h>
#include <linux/pid_namespace.h>
+#include <linux/vserver/global.h>
+#include <linux/vserver/debug.h>
#include <net/net_namespace.h>
#include <linux/ipc_namespace.h>
@@ -38,6 +40,9 @@ static inline struct nsproxy *clone_nspr
if (ns) {
memcpy(ns, orig, sizeof(struct nsproxy));
atomic_set(&ns->count, 1);
+ vxdprintk(VXD_CBIT(space, 2), "clone_nsproxy(%p[%u] = %p[1]",
+ orig, atomic_read(&orig->count), ns);
+ atomic_inc(&vs_global_nsproxy);
}
return ns;
}
@@ -47,47 +52,51 @@ static inline struct nsproxy *clone_nspr
* Return the newly created nsproxy. Do not attach this to the task,
* leave it to the caller to do proper locking and attach it to task.
*/
-static struct nsproxy *create_new_namespaces(unsigned long flags,
- struct task_struct *tsk, struct fs_struct *new_fs)
+static struct nsproxy *unshare_namespaces(unsigned long flags,
+ struct nsproxy *orig, struct fs_struct *new_fs)
{
struct nsproxy *new_nsp;
int err;
- new_nsp = clone_nsproxy(tsk->nsproxy);
+ vxdprintk(VXD_CBIT(space, 4),
+ "unshare_namespaces(0x%08lx,%p,%p)",
+ flags, orig, new_fs);
+
+ new_nsp = clone_nsproxy(orig);
if (!new_nsp)
return ERR_PTR(-ENOMEM);
- new_nsp->mnt_ns = copy_mnt_ns(flags, tsk->nsproxy->mnt_ns, new_fs);
+ new_nsp->mnt_ns = copy_mnt_ns(flags, orig->mnt_ns, new_fs);
if (IS_ERR(new_nsp->mnt_ns)) {
err = PTR_ERR(new_nsp->mnt_ns);
goto out_ns;
}
- new_nsp->uts_ns = copy_utsname(flags, tsk->nsproxy->uts_ns);
+ new_nsp->uts_ns = copy_utsname(flags, orig->uts_ns);
if (IS_ERR(new_nsp->uts_ns)) {
err = PTR_ERR(new_nsp->uts_ns);
goto out_uts;
}
- new_nsp->ipc_ns = copy_ipcs(flags, tsk->nsproxy->ipc_ns);
+ new_nsp->ipc_ns = copy_ipcs(flags, orig->ipc_ns);
if (IS_ERR(new_nsp->ipc_ns)) {
err = PTR_ERR(new_nsp->ipc_ns);
goto out_ipc;
}
- new_nsp->pid_ns = copy_pid_ns(flags, task_active_pid_ns(tsk));
+ new_nsp->pid_ns = copy_pid_ns(flags, orig->pid_ns);
if (IS_ERR(new_nsp->pid_ns)) {
err = PTR_ERR(new_nsp->pid_ns);
goto out_pid;
}
- new_nsp->user_ns = copy_user_ns(flags, tsk->nsproxy->user_ns);
+ new_nsp->user_ns = copy_user_ns(flags, orig->user_ns);
if (IS_ERR(new_nsp->user_ns)) {
err = PTR_ERR(new_nsp->user_ns);
goto out_user;
}
- new_nsp->net_ns = copy_net_ns(flags, tsk->nsproxy->net_ns);
+ new_nsp->net_ns = copy_net_ns(flags, orig->net_ns);
if (IS_ERR(new_nsp->net_ns)) {
err = PTR_ERR(new_nsp->net_ns);
goto out_net;
@@ -115,6 +124,37 @@ out_ns:
return ERR_PTR(err);
}
+static struct nsproxy *create_new_namespaces(int flags, struct task_struct *tsk,
+ struct fs_struct *new_fs)
+{
+ return unshare_namespaces(flags, tsk->nsproxy, new_fs);
+}
+
+/*
+ * copies the nsproxy, setting refcount to 1, and grabbing a
+ * reference to all contained namespaces.
+ */
+struct nsproxy *copy_nsproxy(struct nsproxy *orig)
+{
+ struct nsproxy *ns = clone_nsproxy(orig);
+
+ if (ns) {
+ if (ns->mnt_ns)
+ get_mnt_ns(ns->mnt_ns);
+ if (ns->uts_ns)
+ get_uts_ns(ns->uts_ns);
+ if (ns->ipc_ns)
+ get_ipc_ns(ns->ipc_ns);
+ if (ns->pid_ns)
+ get_pid_ns(ns->pid_ns);
+ if (ns->user_ns)
+ get_user_ns(ns->user_ns);
+ if (ns->net_ns)
+ get_net(ns->net_ns);
+ }
+ return ns;
+}
+
/*
* called from clone. This now handles copy for nsproxy and all
* namespaces therein.
@@ -122,9 +162,12 @@ out_ns:
int copy_namespaces(unsigned long flags, struct task_struct *tsk)
{
struct nsproxy *old_ns = tsk->nsproxy;
- struct nsproxy *new_ns;
+ struct nsproxy *new_ns = NULL;
int err = 0;
+ vxdprintk(VXD_CBIT(space, 7), "copy_namespaces(0x%08lx,%p[%p])",
+ flags, tsk, old_ns);
+
if (!old_ns)
return 0;
@@ -155,6 +198,9 @@ int copy_namespaces(unsigned long flags,
out:
put_nsproxy(old_ns);
+ vxdprintk(VXD_CBIT(space, 3),
+ "copy_namespaces(0x%08lx,%p[%p]) = %d [%p]",
+ flags, tsk, old_ns, err, new_ns);
return err;
}
@@ -171,6 +217,7 @@ void free_nsproxy(struct nsproxy *ns)
if (ns->user_ns)
put_user_ns(ns->user_ns);
put_net(ns->net_ns);
+ atomic_dec(&vs_global_nsproxy);
kmem_cache_free(nsproxy_cachep, ns);
}
@@ -183,6 +230,10 @@ int unshare_nsproxy_namespaces(unsigned
{
int err = 0;
+ vxdprintk(VXD_CBIT(space, 4),
+ "unshare_nsproxy_namespaces(0x%08lx,[%p])",
+ unshare_flags, current->nsproxy);
+
if (!(unshare_flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC |
CLONE_NEWUSER | CLONE_NEWNET)))
return 0;
--- a/kernel/pid.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/pid.c 2008-04-29 17:56:00.000000000 -0400
@@ -35,6 +35,8 @@
#include <linux/pid_namespace.h>
#include <linux/init_task.h>
#include <linux/syscalls.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/global.h>
#define pid_hashfn(nr, ns) \
hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift)
@@ -303,7 +305,7 @@ EXPORT_SYMBOL_GPL(find_pid_ns);
struct pid *find_vpid(int nr)
{
- return find_pid_ns(nr, current->nsproxy->pid_ns);
+ return find_pid_ns(vx_rmap_pid(nr), current->nsproxy->pid_ns);
}
EXPORT_SYMBOL_GPL(find_vpid);
@@ -359,6 +361,9 @@ void transfer_pid(struct task_struct *ol
struct task_struct *pid_task(struct pid *pid, enum pid_type type)
{
struct task_struct *result = NULL;
+
+ if (type == PIDTYPE_REALPID)
+ type = PIDTYPE_PID;
if (pid) {
struct hlist_node *first;
first = rcu_dereference(pid->tasks[type].first);
@@ -388,14 +393,14 @@ EXPORT_SYMBOL(find_task_by_pid);
struct task_struct *find_task_by_vpid(pid_t vnr)
{
- return find_task_by_pid_type_ns(PIDTYPE_PID, vnr,
+ return find_task_by_pid_type_ns(PIDTYPE_PID, vx_rmap_pid(vnr),
current->nsproxy->pid_ns);
}
EXPORT_SYMBOL(find_task_by_vpid);
struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
{
- return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns);
+ return find_task_by_pid_type_ns(PIDTYPE_PID, vx_rmap_pid(nr), ns);
}
EXPORT_SYMBOL(find_task_by_pid_ns);
@@ -430,7 +435,7 @@ struct pid *find_get_pid(pid_t nr)
return pid;
}
-pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+pid_t pid_unmapped_nr_ns(struct pid *pid, struct pid_namespace *ns)
{
struct upid *upid;
pid_t nr = 0;
@@ -443,6 +448,11 @@ pid_t pid_nr_ns(struct pid *pid, struct
return nr;
}
+pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
+{
+ return vx_map_pid(pid_unmapped_nr_ns(pid, ns));
+}
+
pid_t pid_vnr(struct pid *pid)
{
return pid_nr_ns(pid, current->nsproxy->pid_ns);
--- a/kernel/pid_namespace.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/pid_namespace.c 2008-04-23 11:52:08.000000000 -0400
@@ -12,6 +12,7 @@
#include <linux/pid_namespace.h>
#include <linux/syscalls.h>
#include <linux/err.h>
+#include <linux/vserver/global.h>
#define BITS_PER_PAGE (PAGE_SIZE*8)
@@ -84,6 +85,7 @@ static struct pid_namespace *create_pid_
goto out_free_map;
kref_init(&ns->kref);
+ atomic_inc(&vs_global_pid_ns);
ns->last_pid = 0;
ns->child_reaper = NULL;
ns->level = level;
@@ -112,6 +114,7 @@ static void destroy_pid_namespace(struct
for (i = 0; i < PIDMAP_ENTRIES; i++)
kfree(ns->pidmap[i].page);
+ atomic_dec(&vs_global_pid_ns);
kmem_cache_free(pid_ns_cachep, ns);
}
--- a/kernel/posix-timers.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/posix-timers.c 2008-04-19 15:14:52.000000000 -0400
@@ -47,6 +47,7 @@
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/module.h>
+#include <linux/vs_context.h>
/*
* Management arrays for POSIX timers. Timers are kept in slab memory
@@ -299,6 +300,12 @@ void do_schedule_next_timer(struct sigin
int posix_timer_event(struct k_itimer *timr,int si_private)
{
+ struct vx_info_save vxis;
+ struct vx_info *vxi;
+ int ret;
+
+ vxi = task_get_vx_info(timr->it_process);
+ enter_vx_info(vxi, &vxis);
memset(&timr->sigq->info, 0, sizeof(siginfo_t));
timr->sigq->info.si_sys_private = si_private;
/* Send signal to the process that owns this timer.*/
@@ -311,11 +318,11 @@ int posix_timer_event(struct k_itimer *t
if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
struct task_struct *leader;
- int ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
- timr->it_process);
+ ret = send_sigqueue(timr->it_sigev_signo, timr->sigq,
+ timr->it_process);
if (likely(ret >= 0))
- return ret;
+ goto out;
timr->it_sigev_notify = SIGEV_SIGNAL;
leader = timr->it_process->group_leader;
@@ -323,8 +330,12 @@ int posix_timer_event(struct k_itimer *t
timr->it_process = leader;
}
- return send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
+ ret = send_group_sigqueue(timr->it_sigev_signo, timr->sigq,
timr->it_process);
+out:
+ leave_vx_info(&vxis);
+ put_vx_info(vxi);
+ return ret;
}
EXPORT_SYMBOL_GPL(posix_timer_event);
--- a/kernel/printk.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/printk.c 2008-04-21 10:59:28.000000000 -0400
@@ -32,6 +32,7 @@
#include <linux/security.h>
#include <linux/bootmem.h>
#include <linux/syscalls.h>
+#include <linux/vs_cvirt.h>
#include <asm/uaccess.h>
@@ -297,18 +298,13 @@ int do_syslog(int type, char __user *buf
unsigned i, j, limit, count;
int do_clear = 0;
char c;
- int error = 0;
+ int error;
error = security_syslog(type);
if (error)
return error;
- switch (type) {
- case 0: /* Close log */
- break;
- case 1: /* Open log */
- break;
- case 2: /* Read from log */
+ if ((type >= 2) && (type <= 4)) {
error = -EINVAL;
if (!buf || len < 0)
goto out;
@@ -319,6 +315,16 @@ int do_syslog(int type, char __user *buf
error = -EFAULT;
goto out;
}
+ }
+ if (!vx_check(0, VS_ADMIN|VS_WATCH))
+ return vx_do_syslog(type, buf, len);
+
+ switch (type) {
+ case 0: /* Close log */
+ break;
+ case 1: /* Open log */
+ break;
+ case 2: /* Read from log */
error = wait_event_interruptible(log_wait,
(log_start - log_end));
if (error)
@@ -343,16 +349,6 @@ int do_syslog(int type, char __user *buf
do_clear = 1;
/* FALL THRU */
case 3: /* Read last kernel messages */
- error = -EINVAL;
- if (!buf || len < 0)
- goto out;
- error = 0;
- if (!len)
- goto out;
- if (!access_ok(VERIFY_WRITE, buf, len)) {
- error = -EFAULT;
- goto out;
- }
count = len;
if (count > log_buf_len)
count = log_buf_len;
--- a/kernel/ptrace.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/ptrace.c 2008-04-21 10:50:28.000000000 -0400
@@ -21,6 +21,7 @@
#include <linux/audit.h>
#include <linux/pid_namespace.h>
#include <linux/syscalls.h>
+#include <linux/vs_context.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -147,6 +148,11 @@ int __ptrace_may_attach(struct task_stru
dumpable = get_dumpable(task->mm);
if (!dumpable && !capable(CAP_SYS_PTRACE))
return -EPERM;
+ if (!vx_check(task->xid, VS_ADMIN_P|VS_IDENT))
+ return -EPERM;
+ if (!vx_check(task->xid, VS_IDENT) &&
+ !task_vx_flags(task, VXF_STATE_ADMIN, 0))
+ return -EACCES;
return security_ptrace(current, task);
}
@@ -562,6 +568,10 @@ asmlinkage long sys_ptrace(long request,
goto out;
}
+ ret = -EPERM;
+ if (!vx_check(vx_task_xid(child), VS_WATCH_P | VS_IDENT))
+ goto out_put_task_struct;
+
if (request == PTRACE_ATTACH) {
ret = ptrace_attach(child);
/*
--- a/kernel/sched.c 2008-05-21 14:30:05.000000000 -0400
+++ a/kernel/sched.c 2008-05-21 14:30:41.000000000 -0400
@@ -66,6 +66,8 @@
#include <linux/unistd.h>
#include <linux/pagemap.h>
#include <linux/hrtimer.h>
+#include <linux/vs_sched.h>
+#include <linux/vs_cvirt.h>
#include <asm/tlb.h>
#include <asm/irq_regs.h>
@@ -375,6 +377,16 @@ struct root_domain {
static struct root_domain def_root_domain;
#endif
+ unsigned long norm_time;
+ unsigned long idle_time;
+#ifdef CONFIG_VSERVER_IDLETIME
+ int idle_skip;
+#endif
+#ifdef CONFIG_VSERVER_HARDCPU
+ struct list_head hold_queue;
+ unsigned long nr_onhold;
+ int idle_tokens;
+#endif
/*
* This is the main, per-CPU runqueue data structure.
@@ -1366,6 +1378,7 @@ static void set_load_weight(struct task_
static void enqueue_task(struct rq *rq, struct task_struct *p, int wakeup)
{
+ // BUG_ON(p->state & TASK_ONHOLD);
sched_info_queued(p);
p->sched_class->enqueue_task(rq, p, wakeup);
p->se.on_rq = 1;
@@ -1556,6 +1569,9 @@ struct migration_req {
struct completion done;
};
+#include "sched_mon.h"
+
+
/*
* The task's runqueue lock must be held.
* Returns true if you have to wait for migration thread.
@@ -1565,6 +1581,7 @@ migrate_task(struct task_struct *p, int
{
struct rq *rq = task_rq(p);
+ vxm_migrate_task(p, rq, dest_cpu);
/*
* If the task is not on a runqueue (and not running), then
* it is sufficient to simply update the task's cpu field.
@@ -1926,6 +1943,12 @@ static int try_to_wake_up(struct task_st
/* might preempt at this point */
rq = task_rq_lock(p, &flags);
old_state = p->state;
+
+ /* we need to unhold suspended tasks
+ if (old_state & TASK_ONHOLD) {
+ vx_unhold_task(p, rq);
+ old_state = p->state;
+ } */
if (!(old_state & state))
goto out;
if (p->se.on_rq)
@@ -3697,13 +3720,16 @@ unsigned long long task_sched_runtime(st
void account_user_time(struct task_struct *p, cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
cputime64_t tmp;
+ int nice = (TASK_NICE(p) > 0);
p->utime = cputime_add(p->utime, cputime);
+ vx_account_user(vxi, cputime, nice);
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
- if (TASK_NICE(p) > 0)
+ if (nice)
cpustat->nice = cputime64_add(cpustat->nice, tmp);
else
cpustat->user = cputime64_add(cpustat->user, tmp);
@@ -3748,6 +3774,7 @@ void account_system_time(struct task_str
cputime_t cputime)
{
struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat;
+ struct vx_info *vxi = p->vx_info; /* p is _always_ current */
struct rq *rq = this_rq();
cputime64_t tmp;
@@ -3755,6 +3782,7 @@ void account_system_time(struct task_str
return account_guest_time(p, cputime);
p->stime = cputime_add(p->stime, cputime);
+ vx_account_system(vxi, cputime, (p == rq->idle));
/* Add system time to cpustat. */
tmp = cputime_to_cputime64(cputime);
@@ -4500,7 +4528,7 @@ asmlinkage long sys_nice(int increment)
nice = 19;
if (increment < 0 && !can_nice(current, nice))
- return -EPERM;
+ return vx_flags(VXF_IGNEG_NICE, 0) ? 0 : -EPERM;
retval = security_task_setnice(current, nice);
if (retval)
--- a/kernel/sched_fair.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/sched_fair.c 2008-04-19 15:14:52.000000000 -0400
@@ -537,6 +537,8 @@ enqueue_entity(struct cfs_rq *cfs_rq, st
check_spread(cfs_rq, se);
if (se != cfs_rq->curr)
__enqueue_entity(cfs_rq, se);
+ if (entity_is_task(se))
+ vx_activate_task(task_of(se));
account_entity_enqueue(cfs_rq, se);
}
@@ -580,6 +582,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, st
if (se != cfs_rq->curr)
__dequeue_entity(cfs_rq, se);
+ if (entity_is_task(se))
+ vx_deactivate_task(task_of(se));
account_entity_dequeue(cfs_rq, se);
}
--- a/kernel/sched_hard.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/sched_hard.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,324 @@
+
+#ifdef CONFIG_VSERVER_IDLELIMIT
+
+/*
+ * vx_idle_resched - reschedule after maxidle
+ */
+static inline
+void vx_idle_resched(struct rq *rq)
+{
+ /* maybe have a better criterion for paused */
+ if (!--rq->idle_tokens && !list_empty(&rq->hold_queue))
+ set_need_resched();
+}
+
+#else /* !CONFIG_VSERVER_IDLELIMIT */
+
+#define vx_idle_resched(rq)
+
+#endif /* CONFIG_VSERVER_IDLELIMIT */
+
+
+
+#ifdef CONFIG_VSERVER_IDLETIME
+
+#define vx_set_rq_min_skip(rq, min) \
+ (rq)->idle_skip = (min)
+
+#define vx_save_min_skip(ret, min, val) \
+ __vx_save_min_skip(ret, min, val)
+
+static inline
+void __vx_save_min_skip(int ret, int *min, int val)
+{
+ if (ret > -2)
+ return;
+ if ((*min > val) || !*min)
+ *min = val;
+}
+
+static inline
+int vx_try_skip(struct rq *rq, int cpu)
+{
+ /* artificially advance time */
+ if (rq->idle_skip > 0) {
+ vxdprintk(list_empty(&rq->hold_queue),
+ "hold queue empty on cpu %d", cpu);
+ rq->idle_time += rq->idle_skip;
+ vxm_idle_skip(rq, cpu);
+ return 1;
+ }
+ return 0;
+}
+
+#else /* !CONFIG_VSERVER_IDLETIME */
+
+#define vx_set_rq_min_skip(rq, min) \
+ ({ int dummy = (min); dummy; })
+
+#define vx_save_min_skip(ret, min, val)
+
+static inline
+int vx_try_skip(struct rq *rq, int cpu)
+{
+ return 0;
+}
+
+#endif /* CONFIG_VSERVER_IDLETIME */
+
+
+
+#ifdef CONFIG_VSERVER_HARDCPU
+
+#define vx_set_rq_max_idle(rq, max) \
+ (rq)->idle_tokens = (max)
+
+#define vx_save_max_idle(ret, min, val) \
+ __vx_save_max_idle(ret, min, val)
+
+static inline
+void __vx_save_max_idle(int ret, int *min, int val)
+{
+ if (*min > val)
+ *min = val;
+}
+
+
+/*
+ * vx_hold_task - put a task on the hold queue
+ */
+static inline
+void vx_hold_task(struct task_struct *p, struct rq *rq)
+{
+ __deactivate_task(p, rq);
+ p->state |= TASK_ONHOLD;
+ /* a new one on hold */
+ rq->nr_onhold++;
+ vxm_hold_task(p, rq);
+ list_add_tail(&p->run_list, &rq->hold_queue);
+}
+
+/*
+ * vx_unhold_task - put a task back to the runqueue
+ */
+static inline
+void vx_unhold_task(struct task_struct *p, struct rq *rq)
+{
+ list_del(&p->run_list);
+ /* one less waiting */
+ rq->nr_onhold--;
+ p->state &= ~TASK_ONHOLD;
+ enqueue_task(p, rq->expired);
+ inc_nr_running(p, rq);
+ vxm_unhold_task(p, rq);
+
+ if (p->static_prio < rq->best_expired_prio)
+ rq->best_expired_prio = p->static_prio;
+}
+
+unsigned long nr_onhold(void)
+{
+ unsigned long i, sum = 0;
+
+ for_each_online_cpu(i)
+ sum += cpu_rq(i)->nr_onhold;
+
+ return sum;
+}
+
+
+
+static inline
+int __vx_tokens_avail(struct _vx_sched_pc *sched_pc)
+{
+ return sched_pc->tokens;
+}
+
+static inline
+void __vx_consume_token(struct _vx_sched_pc *sched_pc)
+{
+ sched_pc->tokens--;
+}
+
+static inline
+int vx_need_resched(struct task_struct *p, int slice, int cpu)
+{
+ struct vx_info *vxi = p->vx_info;
+
+ if (vx_info_flags(vxi, VXF_SCHED_HARD|VXF_SCHED_PRIO, 0)) {
+ struct _vx_sched_pc *sched_pc =
+ &vx_per_cpu(vxi, sched_pc, cpu);
+ int tokens;
+
+ /* maybe we can simplify that to decrement
+ the token counter unconditional? */
+
+ if ((tokens = __vx_tokens_avail(sched_pc)) > 0)
+ __vx_consume_token(sched_pc);
+
+ /* for tokens > 0, one token was consumed */
+ if (tokens < 2)
+ slice = 0;
+ }
+ vxm_need_resched(p, slice, cpu);
+ return (slice == 0);
+}
+
+
+#define vx_set_rq_time(rq, time) do { \
+ rq->norm_time = time; \
+} while (0)
+
+
+static inline
+void vx_try_unhold(struct rq *rq, int cpu)
+{
+ struct vx_info *vxi = NULL;
+ struct list_head *l, *n;
+ int maxidle = HZ;
+ int minskip = 0;
+
+ /* nothing to do? what about pause? */
+ if (list_empty(&rq->hold_queue))
+ return;
+
+ list_for_each_safe(l, n, &rq->hold_queue) {
+ int ret, delta_min[2];
+ struct _vx_sched_pc *sched_pc;
+ struct task_struct *p;
+
+ p = list_entry(l, struct task_struct, run_list);
+ /* don't bother with same context */
+ if (vxi == p->vx_info)
+ continue;
+
+ vxi = p->vx_info;
+ /* ignore paused contexts */
+ if (vx_info_flags(vxi, VXF_SCHED_PAUSE, 0))
+ continue;
+
+ sched_pc = &vx_per_cpu(vxi, sched_pc, cpu);
+
+ /* recalc tokens */
+ vxm_sched_info(sched_pc, vxi, cpu);
+ ret = vx_tokens_recalc(sched_pc,
+ &rq->norm_time, &rq->idle_time, delta_min);
+ vxm_tokens_recalc(sched_pc, rq, vxi, cpu);
+
+ if (ret > 0) {
+ /* we found a runable context */
+ vx_unhold_task(p, rq);
+ break;
+ }
+ vx_save_max_idle(ret, &maxidle, delta_min[0]);
+ vx_save_min_skip(ret, &minskip, delta_min[1]);
+ }
+ vx_set_rq_max_idle(rq, maxidle);
+ vx_set_rq_min_skip(rq, minskip);
+ vxm_rq_max_min(rq, cpu);
+}
+
+
+static inline
+int vx_schedule(struct task_struct *next, struct rq *rq, int cpu)
+{
+ struct vx_info *vxi = next->vx_info;
+ struct _vx_sched_pc *sched_pc;
+ int delta_min[2];
+ int flags, ret;
+
+ if (!vxi)
+ return 1;
+
+ flags = vxi->vx_flags;
+
+ if (unlikely(vs_check_flags(flags, VXF_SCHED_PAUSE, 0)))
+ goto put_on_hold;
+ if (!vs_check_flags(flags, VXF_SCHED_HARD | VXF_SCHED_PRIO, 0))
+ return 1;
+
+ sched_pc = &vx_per_cpu(vxi, sched_pc, cpu);
+#ifdef CONFIG_SMP
+ /* update scheduler params */
+ if (cpu_isset(cpu, vxi->sched.update)) {
+ vx_update_sched_param(&vxi->sched, sched_pc);
+ vxm_update_sched(sched_pc, vxi, cpu);
+ cpu_clear(cpu, vxi->sched.update);
+ }
+#endif
+ vxm_sched_info(sched_pc, vxi, cpu);
+ ret = vx_tokens_recalc(sched_pc,
+ &rq->norm_time, &rq->idle_time, delta_min);
+ vxm_tokens_recalc(sched_pc, rq, vxi, cpu);
+
+ if (!vs_check_flags(flags, VXF_SCHED_HARD, 0))
+ return 1;
+
+ if (unlikely(ret < 0)) {
+ vx_save_max_idle(ret, &rq->idle_tokens, delta_min[0]);
+ vx_save_min_skip(ret, &rq->idle_skip, delta_min[1]);
+ vxm_rq_max_min(rq, cpu);
+ put_on_hold:
+ vx_hold_task(next, rq);
+ return 0;
+ }
+ return 1;
+}
+
+
+#else /* CONFIG_VSERVER_HARDCPU */
+
+static inline
+void vx_hold_task(struct task_struct *p, struct rq *rq)
+{
+ return;
+}
+
+static inline
+void vx_unhold_task(struct task_struct *p, struct rq *rq)
+{
+ return;
+}
+
+unsigned long nr_onhold(void)
+{
+ return 0;
+}
+
+
+static inline
+int vx_need_resched(struct task_struct *p, int slice, int cpu)
+{
+ return (slice == 0);
+}
+
+
+#define vx_set_rq_time(rq, time)
+
+static inline
+void vx_try_unhold(struct rq *rq, int cpu)
+{
+ return;
+}
+
+static inline
+int vx_schedule(struct task_struct *next, struct rq *rq, int cpu)
+{
+ struct vx_info *vxi = next->vx_info;
+ struct _vx_sched_pc *sched_pc;
+ int delta_min[2];
+ int ret;
+
+ if (!vx_info_flags(vxi, VXF_SCHED_PRIO, 0))
+ return 1;
+
+ sched_pc = &vx_per_cpu(vxi, sched_pc, cpu);
+ vxm_sched_info(sched_pc, vxi, cpu);
+ ret = vx_tokens_recalc(sched_pc,
+ &rq->norm_time, &rq->idle_time, delta_min);
+ vxm_tokens_recalc(sched_pc, rq, vxi, cpu);
+ return 1;
+}
+
+#endif /* CONFIG_VSERVER_HARDCPU */
+
--- a/kernel/sched_mon.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/sched_mon.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,200 @@
+
+#include <linux/vserver/monitor.h>
+
+#ifdef CONFIG_VSERVER_MONITOR
+
+#ifdef CONFIG_VSERVER_HARDCPU
+#define HARDCPU(x) (x)
+#else
+#define HARDCPU(x) (0)
+#endif
+
+#ifdef CONFIG_VSERVER_IDLETIME
+#define IDLETIME(x) (x)
+#else
+#define IDLETIME(x) (0)
+#endif
+
+struct _vx_mon_entry *vxm_advance(int cpu);
+
+
+static inline
+void __vxm_basic(struct _vx_mon_entry *entry, xid_t xid, int type)
+{
+ entry->type = type;
+ entry->xid = xid;
+}
+
+static inline
+void __vxm_sync(int cpu)
+{
+ struct _vx_mon_entry *entry = vxm_advance(cpu);
+
+ __vxm_basic(entry, 0, VXM_SYNC);
+ entry->ev.sec = xtime.tv_sec;
+ entry->ev.nsec = xtime.tv_nsec;
+}
+
+static inline
+void __vxm_task(struct task_struct *p, int type)
+{
+ struct _vx_mon_entry *entry = vxm_advance(task_cpu(p));
+
+ __vxm_basic(entry, p->xid, type);
+ entry->ev.tsk.pid = p->pid;
+ entry->ev.tsk.state = p->state;
+}
+
+static inline
+void __vxm_sched(struct _vx_sched_pc *s, struct vx_info *vxi, int cpu)
+{
+ struct _vx_mon_entry *entry = vxm_advance(cpu);
+
+ __vxm_basic(entry, vxi->vx_id, (VXM_SCHED | s->flags));
+ entry->sd.tokens = s->tokens;
+ entry->sd.norm_time = s->norm_time;
+ entry->sd.idle_time = s->idle_time;
+}
+
+static inline
+void __vxm_rqinfo1(struct rq *q, int cpu)
+{
+ struct _vx_mon_entry *entry = vxm_advance(cpu);
+
+ entry->type = VXM_RQINFO_1;
+ entry->xid = ((unsigned long)q >> 16) & 0xffff;
+ entry->q1.running = q->nr_running;
+ entry->q1.onhold = HARDCPU(q->nr_onhold);
+ entry->q1.iowait = atomic_read(&q->nr_iowait);
+ entry->q1.uintr = q->nr_uninterruptible;
+ entry->q1.idle_tokens = IDLETIME(q->idle_tokens);
+}
+
+static inline
+void __vxm_rqinfo2(struct rq *q, int cpu)
+{
+ struct _vx_mon_entry *entry = vxm_advance(cpu);
+
+ entry->type = VXM_RQINFO_2;
+ entry->xid = (unsigned long)q & 0xffff;
+ entry->q2.norm_time = q->norm_time;
+ entry->q2.idle_time = q->idle_time;
+ entry->q2.idle_skip = IDLETIME(q->idle_skip);
+}
+
+static inline
+void __vxm_update(struct _vx_sched_pc *s, struct vx_info *vxi, int cpu)
+{
+ struct _vx_mon_entry *entry = vxm_advance(cpu);
+
+ __vxm_basic(entry, vxi->vx_id, VXM_UPDATE);
+ entry->ev.tokens = s->tokens;
+}
+
+static inline
+void __vxm_update1(struct _vx_sched_pc *s, struct vx_info *vxi, int cpu)
+{
+ struct _vx_mon_entry *entry = vxm_advance(cpu);
+
+ __vxm_basic(entry, vxi->vx_id, VXM_UPDATE_1);
+ entry->u1.tokens_max = s->tokens_max;
+ entry->u1.fill_rate = s->fill_rate[0];
+ entry->u1.interval = s->interval[0];
+}
+
+static inline
+void __vxm_update2(struct _vx_sched_pc *s, struct vx_info *vxi, int cpu)
+{
+ struct _vx_mon_entry *entry = vxm_advance(cpu);
+
+ __vxm_basic(entry, vxi->vx_id, VXM_UPDATE_2);
+ entry->u2.tokens_min = s->tokens_min;
+ entry->u2.fill_rate = s->fill_rate[1];
+ entry->u2.interval = s->interval[1];
+}
+
+
+#define vxm_activate_task(p,q) __vxm_task(p, VXM_ACTIVATE)
+#define vxm_activate_idle(p,q) __vxm_task(p, VXM_IDLE)
+#define vxm_deactivate_task(p,q) __vxm_task(p, VXM_DEACTIVATE)
+#define vxm_hold_task(p,q) __vxm_task(p, VXM_HOLD)
+#define vxm_unhold_task(p,q) __vxm_task(p, VXM_UNHOLD)
+
+static inline
+void vxm_migrate_task(struct task_struct *p, struct rq *rq, int dest)
+{
+ __vxm_task(p, VXM_MIGRATE);
+ __vxm_rqinfo1(rq, task_cpu(p));
+ __vxm_rqinfo2(rq, task_cpu(p));
+}
+
+static inline
+void vxm_idle_skip(struct rq *rq, int cpu)
+{
+ __vxm_rqinfo1(rq, cpu);
+ __vxm_rqinfo2(rq, cpu);
+}
+
+static inline
+void vxm_need_resched(struct task_struct *p, int slice, int cpu)
+{
+ if (slice)
+ return;
+
+ __vxm_task(p, VXM_RESCHED);
+}
+
+static inline
+void vxm_sync(unsigned long now, int cpu)
+{
+ if (!CONFIG_VSERVER_MONITOR_SYNC ||
+ (now % CONFIG_VSERVER_MONITOR_SYNC))
+ return;
+
+ __vxm_sync(cpu);
+}
+
+#define vxm_sched_info(s,v,c) __vxm_sched(s,v,c)
+
+static inline
+void vxm_tokens_recalc(struct _vx_sched_pc *s, struct rq *rq,
+ struct vx_info *vxi, int cpu)
+{
+ __vxm_sched(s, vxi, cpu);
+ __vxm_rqinfo2(rq, cpu);
+}
+
+static inline
+void vxm_update_sched(struct _vx_sched_pc *s, struct vx_info *vxi, int cpu)
+{
+ __vxm_sched(s, vxi, cpu);
+ __vxm_update(s, vxi, cpu);
+ __vxm_update1(s, vxi, cpu);
+ __vxm_update2(s, vxi, cpu);
+}
+
+static inline
+void vxm_rq_max_min(struct rq *rq, int cpu)
+{
+ __vxm_rqinfo1(rq, cpu);
+ __vxm_rqinfo2(rq, cpu);
+}
+
+#else /* CONFIG_VSERVER_MONITOR */
+
+#define vxm_activate_task(t,q) do { } while (0)
+#define vxm_activate_idle(t,q) do { } while (0)
+#define vxm_deactivate_task(t,q) do { } while (0)
+#define vxm_hold_task(t,q) do { } while (0)
+#define vxm_unhold_task(t,q) do { } while (0)
+#define vxm_migrate_task(t,q,d) do { } while (0)
+#define vxm_idle_skip(q,c) do { } while (0)
+#define vxm_need_resched(t,s,c) do { } while (0)
+#define vxm_sync(s,c) do { } while (0)
+#define vxm_sched_info(s,v,c) do { } while (0)
+#define vxm_tokens_recalc(s,q,v,c) do { } while (0)
+#define vxm_update_sched(s,v,c) do { } while (0)
+#define vxm_rq_max_min(q,c) do { } while (0)
+
+#endif /* CONFIG_VSERVER_MONITOR */
+
--- a/kernel/signal.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/signal.c 2008-05-15 15:41:03.000000000 -0400
@@ -26,6 +26,8 @@
#include <linux/freezer.h>
#include <linux/pid_namespace.h>
#include <linux/nsproxy.h>
+#include <linux/vs_context.h>
+#include <linux/vs_pid.h>
#include <asm/param.h>
#include <asm/uaccess.h>
@@ -530,6 +532,14 @@ static int check_kill_permission(int sig
if (!valid_signal(sig))
return error;
+ if ((info != SEND_SIG_NOINFO) &&
+ (is_si_special(info) || !SI_FROMUSER(info)))
+ goto skip;
+
+ vxdprintk(VXD_CBIT(misc, 7),
+ "check_kill_permission(%d,%p,%p[#%u,%u])",
+ sig, info, t, vx_task_xid(t), t->pid);
+
if (info == SEND_SIG_NOINFO || (!is_si_special(info) && SI_FROMUSER(info))) {
error = audit_signal_info(sig, t); /* Let audit system see the signal */
if (error)
@@ -543,6 +553,18 @@ static int check_kill_permission(int sig
return error;
}
+ error = -EPERM;
+ if (t->pid == 1 && current->xid)
+ return error;
+
+ error = -ESRCH;
+ if (!vx_check(vx_task_xid(t), VS_WATCH_P | VS_IDENT)) {
+ vxdprintk(current->xid || VXD_CBIT(misc, 7),
+ "signal %d[%p] xid mismatch %p[#%u,%u] xid=#%u",
+ sig, info, t, vx_task_xid(t), t->pid, current->xid);
+ return error;
+ }
+skip:
return security_task_kill(t, info, sig, 0);
}
@@ -1088,7 +1110,7 @@ int kill_pid_info_as_uid(int sig, struct
read_lock(&tasklist_lock);
p = pid_task(pid, PIDTYPE_PID);
- if (!p) {
+ if (!p || !vx_check(vx_task_xid(p), VS_IDENT)) {
ret = -ESRCH;
goto out_unlock;
}
@@ -1140,7 +1162,9 @@ static int kill_something_info(int sig,
struct task_struct * p;
for_each_process(p) {
- if (p->pid > 1 && !same_thread_group(p, current)) {
+ if (vx_check(vx_task_xid(p), VS_ADMIN|VS_IDENT) &&
+ p->pid > 1 && !same_thread_group(p, current) &&
+ !vx_current_initpid(p->pid)) {
int err = group_send_sig_info(sig, info, p);
++count;
if (err != -EPERM)
@@ -1842,6 +1866,11 @@ relock:
if (is_global_init(current))
continue;
+ /* virtual init is protected against user signals */
+ if ((info->si_code == SI_USER) &&
+ vx_current_initpid(current->pid))
+ continue;
+
if (sig_kernel_stop(signr)) {
/*
* The default action is to stop all threads in
--- a/kernel/softirq.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/softirq.c 2008-04-19 15:14:52.000000000 -0400
@@ -21,6 +21,7 @@
#include <linux/rcupdate.h>
#include <linux/smp.h>
#include <linux/tick.h>
+#include <linux/vs_context.h>
#include <asm/irq.h>
/*
--- a/kernel/sys.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/sys.c 2008-04-19 15:14:52.000000000 -0400
@@ -38,6 +38,7 @@
#include <linux/syscalls.h>
#include <linux/kprobes.h>
#include <linux/user_namespace.h>
+#include <linux/vs_pid.h>
#include <asm/uaccess.h>
#include <asm/io.h>
@@ -116,7 +117,10 @@ static int set_one_prio(struct task_stru
goto out;
}
if (niceval < task_nice(p) && !can_nice(p, niceval)) {
- error = -EACCES;
+ if (vx_flags(VXF_IGNEG_NICE, 0))
+ error = 0;
+ else
+ error = -EACCES;
goto out;
}
no_nice = security_task_setnice(p, niceval);
@@ -164,6 +168,8 @@ asmlinkage long sys_setpriority(int whic
else
pgrp = task_pgrp(current);
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
+ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+ continue;
error = set_one_prio(p, niceval, error);
} while_each_pid_task(pgrp, PIDTYPE_PGID, p);
break;
@@ -224,6 +230,8 @@ asmlinkage long sys_getpriority(int whic
else
pgrp = task_pgrp(current);
do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
+ if (!vx_check(p->xid, VS_ADMIN_P | VS_IDENT))
+ continue;
niceval = 20 - task_nice(p);
if (niceval > retval)
retval = niceval;
@@ -353,6 +361,9 @@ void kernel_power_off(void)
machine_power_off();
}
EXPORT_SYMBOL_GPL(kernel_power_off);
+
+long vs_reboot(unsigned int, void __user *);
+
/*
* Reboot system call: for obvious reasons only root may call it,
* and even root needs to set up some magic numbers in the registers
@@ -383,6 +394,9 @@ asmlinkage long sys_reboot(int magic1, i
if ((cmd == LINUX_REBOOT_CMD_POWER_OFF) && !pm_power_off)
cmd = LINUX_REBOOT_CMD_HALT;
+ if (!vx_check(0, VS_ADMIN|VS_WATCH))
+ return vs_reboot(cmd, arg);
+
lock_kernel();
switch (cmd) {
case LINUX_REBOOT_CMD_RESTART:
@@ -1343,7 +1357,7 @@ asmlinkage long sys_sethostname(char __u
int errno;
char tmp[__NEW_UTS_LEN];
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
return -EPERM;
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
@@ -1388,7 +1402,7 @@ asmlinkage long sys_setdomainname(char _
int errno;
char tmp[__NEW_UTS_LEN];
- if (!capable(CAP_SYS_ADMIN))
+ if (!vx_capable(CAP_SYS_ADMIN, VXC_SET_UTSNAME))
return -EPERM;
if (len < 0 || len > __NEW_UTS_LEN)
return -EINVAL;
@@ -1455,7 +1469,7 @@ asmlinkage long sys_setrlimit(unsigned i
return -EINVAL;
old_rlim = current->signal->rlim + resource;
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
- !capable(CAP_SYS_RESOURCE))
+ !vx_capable(CAP_SYS_RESOURCE, VXC_SET_RLIMIT))
return -EPERM;
if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
return -EPERM;
--- a/kernel/sysctl.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/sysctl.c 2008-04-19 15:14:52.000000000 -0400
@@ -107,6 +107,7 @@ static int ngroups_max = NGROUPS_MAX;
#ifdef CONFIG_KMOD
extern char modprobe_path[];
#endif
+extern char vshelper_path[];
#ifdef CONFIG_CHR_DEV_SG
extern int sg_big_buff;
#endif
@@ -492,6 +493,15 @@ static struct ctl_table kern_table[] = {
.strategy = &sysctl_string,
},
#endif
+ {
+ .ctl_name = KERN_VSHELPER,
+ .procname = "vshelper",
+ .data = &vshelper_path,
+ .maxlen = 256,
+ .mode = 0644,
+ .proc_handler = &proc_dostring,
+ .strategy = &sysctl_string,
+ },
#ifdef CONFIG_CHR_DEV_SG
{
.ctl_name = KERN_SG_BIG_BUFF,
--- a/kernel/sysctl_check.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/sysctl_check.c 2008-04-21 13:42:56.000000000 -0400
@@ -39,6 +39,7 @@ static const struct trans_ctl_table tran
{ KERN_PANIC, "panic" },
{ KERN_REALROOTDEV, "real-root-dev" },
+ { KERN_VSHELPER, "vshelper", },
{ KERN_SPARC_REBOOT, "reboot-cmd" },
{ KERN_CTLALTDEL, "ctrl-alt-del" },
@@ -1217,6 +1218,22 @@ static const struct trans_ctl_table tran
{}
};
+static struct trans_ctl_table trans_vserver_table[] = {
+ { 1, "debug_switch" },
+ { 2, "debug_xid" },
+ { 3, "debug_nid" },
+ { 4, "debug_tag" },
+ { 5, "debug_net" },
+ { 6, "debug_limit" },
+ { 7, "debug_cres" },
+ { 8, "debug_dlim" },
+ { 9, "debug_quota" },
+ { 10, "debug_cvirt" },
+ { 11, "debug_space" },
+ { 12, "debug_misc" },
+ {}
+};
+
static const struct trans_ctl_table trans_root_table[] = {
{ CTL_KERN, "kernel", trans_kern_table },
{ CTL_VM, "vm", trans_vm_table },
@@ -1233,6 +1250,7 @@ static const struct trans_ctl_table tran
{ CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
{ CTL_PM, "pm", trans_pm_table },
{ CTL_FRV, "frv", trans_frv_table },
+ { CTL_VSERVER, "vserver", trans_vserver_table },
{}
};
--- a/kernel/time.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/time.c 2008-04-19 15:14:52.000000000 -0400
@@ -60,6 +60,7 @@ EXPORT_SYMBOL(sys_tz);
asmlinkage long sys_time(time_t __user * tloc)
{
time_t i = get_seconds();
+/* FIXME: do_gettimeofday(&tv) -> vx_gettimeofday(&tv) */
if (tloc) {
if (put_user(i,tloc))
@@ -89,7 +90,7 @@ asmlinkage long sys_stime(time_t __user
if (err)
return err;
- do_settimeofday(&tv);
+ vx_settimeofday(&tv);
return 0;
}
@@ -100,7 +101,7 @@ asmlinkage long sys_gettimeofday(struct
{
if (likely(tv != NULL)) {
struct timeval ktv;
- do_gettimeofday(&ktv);
+ vx_gettimeofday(&ktv);
if (copy_to_user(tv, &ktv, sizeof(ktv)))
return -EFAULT;
}
@@ -175,7 +176,7 @@ int do_sys_settimeofday(struct timespec
/* SMP safe, again the code in arch/foo/time.c should
* globally block out interrupts when it runs.
*/
- return do_settimeofday(tv);
+ return vx_settimeofday(tv);
}
return 0;
}
@@ -307,7 +308,7 @@ void getnstimeofday(struct timespec *tv)
{
struct timeval x;
- do_gettimeofday(&x);
+ vx_gettimeofday(&x);
tv->tv_sec = x.tv_sec;
tv->tv_nsec = x.tv_usec * NSEC_PER_USEC;
}
--- a/kernel/timer.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/timer.c 2008-04-19 15:14:52.000000000 -0400
@@ -37,6 +37,10 @@
#include <linux/delay.h>
#include <linux/tick.h>
#include <linux/kallsyms.h>
+#include <linux/vs_base.h>
+#include <linux/vs_cvirt.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/sched.h>
#include <asm/uaccess.h>
#include <asm/unistd.h>
@@ -955,12 +959,6 @@ asmlinkage unsigned long sys_alarm(unsig
#endif
-#ifndef __alpha__
-
-/*
- * The Alpha uses getxpid, getxuid, and getxgid instead. Maybe this
- * should be moved into arch/i386 instead?
- */
/**
* sys_getpid - return the thread group id of the current process
@@ -989,10 +987,23 @@ asmlinkage long sys_getppid(void)
rcu_read_lock();
pid = task_tgid_vnr(current->real_parent);
rcu_read_unlock();
+ return vx_map_pid(pid);
+}
- return pid;
+#ifdef __alpha__
+
+/*
+ * The Alpha uses getxpid, getxuid, and getxgid instead.
+ */
+
+asmlinkage long do_getxpid(long *ppid)
+{
+ *ppid = sys_getppid();
+ return sys_getpid();
}
+#else /* _alpha_ */
+
asmlinkage long sys_getuid(void)
{
/* Only we change this so SMP safe */
@@ -1160,6 +1171,8 @@ int do_sysinfo(struct sysinfo *info)
tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
tp.tv_sec++;
}
+ if (vx_flags(VXF_VIRT_UPTIME, 0))
+ vx_vsi_uptime(&tp, NULL);
info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
--- a/kernel/user.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/user.c 2008-04-23 16:24:56.000000000 -0400
@@ -219,14 +219,15 @@ static struct kobj_type uids_ktype = {
};
/* create /sys/kernel/uids/<uid>/cpu_share file for this user */
-static int uids_user_create(struct user_struct *up)
+static int uids_user_create(struct user_namespace *ns, struct user_struct *up)
{
struct kobject *kobj = &up->kobj;
int error;
memset(kobj, 0, sizeof(struct kobject));
kobj->kset = uids_kset;
- error = kobject_init_and_add(kobj, &uids_ktype, NULL, "%d", up->uid);
+ error = kobject_init_and_add(kobj, &uids_ktype, NULL,
+ "%p:%d", ns, up->uid);
if (error) {
kobject_put(kobj);
goto done;
@@ -248,7 +249,7 @@ int __init uids_sysfs_init(void)
if (!uids_kset)
return -ENOMEM;
- return uids_user_create(&root_user);
+ return uids_user_create(NULL, &root_user);
}
/* work function to remove sysfs directory for a user and free up
@@ -308,7 +309,8 @@ static inline void free_user(struct user
#else /* CONFIG_USER_SCHED && CONFIG_SYSFS */
int uids_sysfs_init(void) { return 0; }
-static inline int uids_user_create(struct user_struct *up) { return 0; }
+static inline int uids_user_create(struct user_namespace *ns,
+ struct user_struct *up) { return 0; }
static inline void uids_mutex_lock(void) { }
static inline void uids_mutex_unlock(void) { }
@@ -399,7 +401,7 @@ struct user_struct * alloc_uid(struct us
if (sched_create_user(new) < 0)
goto out_put_keys;
- if (uids_user_create(new))
+ if (uids_user_create(ns, new))
goto out_destoy_sched;
/*
--- a/kernel/user_namespace.c 2008-04-17 12:05:44.000000000 -0400
+++ a/kernel/user_namespace.c 2008-04-19 15:14:52.000000000 -0400
@@ -9,6 +9,7 @@
#include <linux/version.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
+#include <linux/vserver/global.h>
/*
* Clone a new ns copying an original user ns, setting refcount to 1
@@ -26,6 +27,7 @@ static struct user_namespace *clone_user
return ERR_PTR(-ENOMEM);
kref_init(&ns->kref);
+ atomic_inc(&vs_global_user_ns);
for (n = 0; n < UIDHASH_SZ; ++n)
INIT_HLIST_HEAD(ns->uidhash_table + n);
@@ -71,5 +73,6 @@ void free_user_ns(struct kref *kref)
ns = container_of(kref, struct user_namespace, kref);
release_uids(ns);
+ atomic_dec(&vs_global_user_ns);
kfree(ns);
}
--- a/kernel/utsname.c 2008-04-17 10:37:25.000000000 -0400
+++ a/kernel/utsname.c 2008-04-19 15:14:52.000000000 -0400
@@ -14,6 +14,7 @@
#include <linux/utsname.h>
#include <linux/version.h>
#include <linux/err.h>
+#include <linux/vserver/global.h>
/*
* Clone a new ns copying an original utsname, setting refcount to 1
@@ -32,6 +33,7 @@ static struct uts_namespace *clone_uts_n
memcpy(&ns->name, &old_ns->name, sizeof(ns->name));
up_read(&uts_sem);
kref_init(&ns->kref);
+ atomic_inc(&vs_global_uts_ns);
return ns;
}
@@ -62,5 +64,6 @@ void free_uts_ns(struct kref *kref)
struct uts_namespace *ns;
ns = container_of(kref, struct uts_namespace, kref);
+ atomic_dec(&vs_global_uts_ns);
kfree(ns);
}
--- a/kernel/vserver/cacct.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/cacct.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,42 @@
+/*
+ * linux/kernel/vserver/cacct.c
+ *
+ * Virtual Server: Context Accounting
+ *
+ * Copyright (C) 2006-2007 Herbert P<>tzl
+ *
+ * V0.01 added accounting stats
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/vs_context.h>
+#include <linux/vserver/cacct_cmd.h>
+#include <linux/vserver/cacct_int.h>
+
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+
+
+int vc_sock_stat(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_sock_stat_v0 vc_data;
+ int j, field;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ field = vc_data.field;
+ if ((field < 0) || (field >= VXA_SOCK_SIZE))
+ return -EINVAL;
+
+ for (j = 0; j < 3; j++) {
+ vc_data.count[j] = vx_sock_count(&vxi->cacct, field, j);
+ vc_data.total[j] = vx_sock_total(&vxi->cacct, field, j);
+ }
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
--- a/kernel/vserver/cacct_init.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/cacct_init.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,25 @@
+
+
+static inline void vx_info_init_cacct(struct _vx_cacct *cacct)
+{
+ int i, j;
+
+
+ for (i = 0; i < VXA_SOCK_SIZE; i++) {
+ for (j = 0; j < 3; j++) {
+ atomic_set(&cacct->sock[i][j].count, 0);
+ atomic_set(&cacct->sock[i][j].total, 0);
+ }
+ }
+ for (i = 0; i < 8; i++)
+ atomic_set(&cacct->slab[i], 0);
+ for (i = 0; i < 5; i++)
+ for (j = 0; j < 4; j++)
+ atomic_set(&cacct->page[i][j], 0);
+}
+
+static inline void vx_info_exit_cacct(struct _vx_cacct *cacct)
+{
+ return;
+}
+
--- a/kernel/vserver/cacct_proc.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/cacct_proc.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,53 @@
+#ifndef _VX_CACCT_PROC_H
+#define _VX_CACCT_PROC_H
+
+#include <linux/vserver/cacct_int.h>
+
+
+#define VX_SOCKA_TOP \
+ "Type\t recv #/bytes\t\t send #/bytes\t\t fail #/bytes\n"
+
+static inline int vx_info_proc_cacct(struct _vx_cacct *cacct, char *buffer)
+{
+ int i, j, length = 0;
+ static char *type[VXA_SOCK_SIZE] = {
+ "UNSPEC", "UNIX", "INET", "INET6", "PACKET", "OTHER"
+ };
+
+ length += sprintf(buffer + length, VX_SOCKA_TOP);
+ for (i = 0; i < VXA_SOCK_SIZE; i++) {
+ length += sprintf(buffer + length, "%s:", type[i]);
+ for (j = 0; j < 3; j++) {
+ length += sprintf(buffer + length,
+ "\t%10lu/%-10lu",
+ vx_sock_count(cacct, i, j),
+ vx_sock_total(cacct, i, j));
+ }
+ buffer[length++] = '\n';
+ }
+
+ length += sprintf(buffer + length, "\n");
+ length += sprintf(buffer + length,
+ "slab:\t %8u %8u %8u %8u\n",
+ atomic_read(&cacct->slab[1]),
+ atomic_read(&cacct->slab[4]),
+ atomic_read(&cacct->slab[0]),
+ atomic_read(&cacct->slab[2]));
+
+ length += sprintf(buffer + length, "\n");
+ for (i = 0; i < 5; i++) {
+ length += sprintf(buffer + length,
+ "page[%d]: %8u %8u %8u %8u\t %8u %8u %8u %8u\n", i,
+ atomic_read(&cacct->page[i][0]),
+ atomic_read(&cacct->page[i][1]),
+ atomic_read(&cacct->page[i][2]),
+ atomic_read(&cacct->page[i][3]),
+ atomic_read(&cacct->page[i][4]),
+ atomic_read(&cacct->page[i][5]),
+ atomic_read(&cacct->page[i][6]),
+ atomic_read(&cacct->page[i][7]));
+ }
+ return length;
+}
+
+#endif /* _VX_CACCT_PROC_H */
--- a/kernel/vserver/context.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/context.c 2008-04-23 22:26:24.000000000 -0400
@@ -0,0 +1,1005 @@
+/*
+ * linux/kernel/vserver/context.c
+ *
+ * Virtual Server: Context Support
+ *
+ * Copyright (C) 2003-2007 Herbert P<>tzl
+ *
+ * V0.01 context helper
+ * V0.02 vx_ctx_kill syscall command
+ * V0.03 replaced context_info calls
+ * V0.04 redesign of struct (de)alloc
+ * V0.05 rlimit basic implementation
+ * V0.06 task_xid and info commands
+ * V0.07 context flags and caps
+ * V0.08 switch to RCU based hash
+ * V0.09 revert to non RCU for now
+ * V0.10 and back to working RCU hash
+ * V0.11 and back to locking again
+ * V0.12 referenced context store
+ * V0.13 separate per cpu data
+ * V0.14 changed vcmds to vxi arg
+ * V0.15 added context stat
+ * V0.16 have __create claim() the vxi
+ * V0.17 removed older and legacy stuff
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/security.h>
+#include <linux/pid_namespace.h>
+
+#include <linux/vserver/context.h>
+#include <linux/vserver/network.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/limit.h>
+#include <linux/vserver/limit_int.h>
+#include <linux/vserver/space.h>
+
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/context_cmd.h>
+
+#include "cvirt_init.h"
+#include "cacct_init.h"
+#include "limit_init.h"
+#include "sched_init.h"
+
+
+atomic_t vx_global_ctotal = ATOMIC_INIT(0);
+atomic_t vx_global_cactive = ATOMIC_INIT(0);
+
+
+/* now inactive context structures */
+
+static struct hlist_head vx_info_inactive = HLIST_HEAD_INIT;
+
+static spinlock_t vx_info_inactive_lock = SPIN_LOCK_UNLOCKED;
+
+
+/* __alloc_vx_info()
+
+ * allocate an initialized vx_info struct
+ * doesn't make it visible (hash) */
+
+static struct vx_info *__alloc_vx_info(xid_t xid)
+{
+ struct vx_info *new = NULL;
+ int cpu;
+
+ vxdprintk(VXD_CBIT(xid, 0), "alloc_vx_info(%d)*", xid);
+
+ /* would this benefit from a slab cache? */
+ new = kmalloc(sizeof(struct vx_info), GFP_KERNEL);
+ if (!new)
+ return 0;
+
+ memset(new, 0, sizeof(struct vx_info));
+#ifdef CONFIG_SMP
+ new->ptr_pc = alloc_percpu(struct _vx_info_pc);
+ if (!new->ptr_pc)
+ goto error;
+#endif
+ new->vx_id = xid;
+ INIT_HLIST_NODE(&new->vx_hlist);
+ atomic_set(&new->vx_usecnt, 0);
+ atomic_set(&new->vx_tasks, 0);
+ new->vx_parent = NULL;
+ new->vx_state = 0;
+ init_waitqueue_head(&new->vx_wait);
+
+ /* prepare reaper */
+ get_task_struct(init_pid_ns.child_reaper);
+ new->vx_reaper = init_pid_ns.child_reaper;
+ new->vx_badness_bias = 0;
+
+ /* rest of init goes here */
+ vx_info_init_limit(&new->limit);
+ vx_info_init_sched(&new->sched);
+ vx_info_init_cvirt(&new->cvirt);
+ vx_info_init_cacct(&new->cacct);
+
+ /* per cpu data structures */
+ for_each_possible_cpu(cpu) {
+ vx_info_init_sched_pc(
+ &vx_per_cpu(new, sched_pc, cpu), cpu);
+ vx_info_init_cvirt_pc(
+ &vx_per_cpu(new, cvirt_pc, cpu), cpu);
+ }
+
+ new->vx_flags = VXF_INIT_SET;
+ cap_set_init_eff(new->vx_bcaps);
+ new->vx_ccaps = 0;
+ // new->vx_cap_bset = current->cap_bset;
+
+ new->reboot_cmd = 0;
+ new->exit_code = 0;
+
+ vxdprintk(VXD_CBIT(xid, 0),
+ "alloc_vx_info(%d) = %p", xid, new);
+ vxh_alloc_vx_info(new);
+ atomic_inc(&vx_global_ctotal);
+ return new;
+#ifdef CONFIG_SMP
+error:
+ kfree(new);
+ return 0;
+#endif
+}
+
+/* __dealloc_vx_info()
+
+ * final disposal of vx_info */
+
+static void __dealloc_vx_info(struct vx_info *vxi)
+{
+ int cpu;
+
+ vxdprintk(VXD_CBIT(xid, 0),
+ "dealloc_vx_info(%p)", vxi);
+ vxh_dealloc_vx_info(vxi);
+
+ vxi->vx_id = -1;
+
+ vx_info_exit_limit(&vxi->limit);
+ vx_info_exit_sched(&vxi->sched);
+ vx_info_exit_cvirt(&vxi->cvirt);
+ vx_info_exit_cacct(&vxi->cacct);
+
+ for_each_possible_cpu(cpu) {
+ vx_info_exit_sched_pc(
+ &vx_per_cpu(vxi, sched_pc, cpu), cpu);
+ vx_info_exit_cvirt_pc(
+ &vx_per_cpu(vxi, cvirt_pc, cpu), cpu);
+ }
+
+ vxi->vx_state |= VXS_RELEASED;
+
+#ifdef CONFIG_SMP
+ free_percpu(vxi->ptr_pc);
+#endif
+ kfree(vxi);
+ atomic_dec(&vx_global_ctotal);
+}
+
+static void __shutdown_vx_info(struct vx_info *vxi)
+{
+ struct nsproxy *nsproxy;
+ struct fs_struct *fs;
+
+ might_sleep();
+
+ vxi->vx_state |= VXS_SHUTDOWN;
+ vs_state_change(vxi, VSC_SHUTDOWN);
+
+ nsproxy = xchg(&vxi->vx_nsproxy, NULL);
+ fs = xchg(&vxi->vx_fs, NULL);
+
+ if (nsproxy)
+ put_nsproxy(nsproxy);
+ if (fs)
+ put_fs_struct(fs);
+}
+
+/* exported stuff */
+
+void free_vx_info(struct vx_info *vxi)
+{
+ unsigned long flags;
+
+ /* check for reference counts first */
+ BUG_ON(atomic_read(&vxi->vx_usecnt));
+ BUG_ON(atomic_read(&vxi->vx_tasks));
+
+ /* context must not be hashed */
+ BUG_ON(vx_info_state(vxi, VXS_HASHED));
+
+ /* context shutdown is mandatory */
+ BUG_ON(!vx_info_state(vxi, VXS_SHUTDOWN));
+
+ BUG_ON(vxi->vx_nsproxy);
+ BUG_ON(vxi->vx_fs);
+
+ spin_lock_irqsave(&vx_info_inactive_lock, flags);
+ hlist_del(&vxi->vx_hlist);
+ spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
+
+ __dealloc_vx_info(vxi);
+}
+
+
+/* hash table for vx_info hash */
+
+#define VX_HASH_SIZE 13
+
+static struct hlist_head vx_info_hash[VX_HASH_SIZE] =
+ { [0 ... VX_HASH_SIZE-1] = HLIST_HEAD_INIT };
+
+static spinlock_t vx_info_hash_lock = SPIN_LOCK_UNLOCKED;
+
+
+static inline unsigned int __hashval(xid_t xid)
+{
+ return (xid % VX_HASH_SIZE);
+}
+
+
+
+/* __hash_vx_info()
+
+ * add the vxi to the global hash table
+ * requires the hash_lock to be held */
+
+static inline void __hash_vx_info(struct vx_info *vxi)
+{
+ struct hlist_head *head;
+
+ vxd_assert_lock(&vx_info_hash_lock);
+ vxdprintk(VXD_CBIT(xid, 4),
+ "__hash_vx_info: %p[#%d]", vxi, vxi->vx_id);
+ vxh_hash_vx_info(vxi);
+
+ /* context must not be hashed */
+ BUG_ON(vx_info_state(vxi, VXS_HASHED));
+
+ vxi->vx_state |= VXS_HASHED;
+ head = &vx_info_hash[__hashval(vxi->vx_id)];
+ hlist_add_head(&vxi->vx_hlist, head);
+ atomic_inc(&vx_global_cactive);
+}
+
+/* __unhash_vx_info()
+
+ * remove the vxi from the global hash table
+ * requires the hash_lock to be held */
+
+static inline void __unhash_vx_info(struct vx_info *vxi)
+{
+ unsigned long flags;
+
+ vxd_assert_lock(&vx_info_hash_lock);
+ vxdprintk(VXD_CBIT(xid, 4),
+ "__unhash_vx_info: %p[#%d.%d.%d]", vxi, vxi->vx_id,
+ atomic_read(&vxi->vx_usecnt), atomic_read(&vxi->vx_tasks));
+ vxh_unhash_vx_info(vxi);
+
+ /* context must be hashed */
+ BUG_ON(!vx_info_state(vxi, VXS_HASHED));
+ /* but without tasks */
+ BUG_ON(atomic_read(&vxi->vx_tasks));
+
+ vxi->vx_state &= ~VXS_HASHED;
+ hlist_del_init(&vxi->vx_hlist);
+ spin_lock_irqsave(&vx_info_inactive_lock, flags);
+ hlist_add_head(&vxi->vx_hlist, &vx_info_inactive);
+ spin_unlock_irqrestore(&vx_info_inactive_lock, flags);
+ atomic_dec(&vx_global_cactive);
+}
+
+
+/* __lookup_vx_info()
+
+ * requires the hash_lock to be held
+ * doesn't increment the vx_refcnt */
+
+static inline struct vx_info *__lookup_vx_info(xid_t xid)
+{
+ struct hlist_head *head = &vx_info_hash[__hashval(xid)];
+ struct hlist_node *pos;
+ struct vx_info *vxi;
+
+ vxd_assert_lock(&vx_info_hash_lock);
+ hlist_for_each(pos, head) {
+ vxi = hlist_entry(pos, struct vx_info, vx_hlist);
+
+ if (vxi->vx_id == xid)
+ goto found;
+ }
+ vxi = NULL;
+found:
+ vxdprintk(VXD_CBIT(xid, 0),
+ "__lookup_vx_info(#%u): %p[#%u]",
+ xid, vxi, vxi ? vxi->vx_id : 0);
+ vxh_lookup_vx_info(vxi, xid);
+ return vxi;
+}
+
+
+/* __create_vx_info()
+
+ * create the requested context
+ * get(), claim() and hash it */
+
+static struct vx_info *__create_vx_info(int id)
+{
+ struct vx_info *new, *vxi = NULL;
+
+ vxdprintk(VXD_CBIT(xid, 1), "create_vx_info(%d)*", id);
+
+ if (!(new = __alloc_vx_info(id)))
+ return ERR_PTR(-ENOMEM);
+
+ /* required to make dynamic xids unique */
+ spin_lock(&vx_info_hash_lock);
+
+ /* static context requested */
+ if ((vxi = __lookup_vx_info(id))) {
+ vxdprintk(VXD_CBIT(xid, 0),
+ "create_vx_info(%d) = %p (already there)", id, vxi);
+ if (vx_info_flags(vxi, VXF_STATE_SETUP, 0))
+ vxi = ERR_PTR(-EBUSY);
+ else
+ vxi = ERR_PTR(-EEXIST);
+ goto out_unlock;
+ }
+ /* new context */
+ vxdprintk(VXD_CBIT(xid, 0),
+ "create_vx_info(%d) = %p (new)", id, new);
+ claim_vx_info(new, NULL);
+ __hash_vx_info(get_vx_info(new));
+ vxi = new, new = NULL;
+
+out_unlock:
+ spin_unlock(&vx_info_hash_lock);
+ vxh_create_vx_info(IS_ERR(vxi) ? NULL : vxi, id);
+ if (new)
+ __dealloc_vx_info(new);
+ return vxi;
+}
+
+
+/* exported stuff */
+
+
+void unhash_vx_info(struct vx_info *vxi)
+{
+ __shutdown_vx_info(vxi);
+ spin_lock(&vx_info_hash_lock);
+ __unhash_vx_info(vxi);
+ spin_unlock(&vx_info_hash_lock);
+ __wakeup_vx_info(vxi);
+}
+
+
+/* lookup_vx_info()
+
+ * search for a vx_info and get() it
+ * negative id means current */
+
+struct vx_info *lookup_vx_info(int id)
+{
+ struct vx_info *vxi = NULL;
+
+ if (id < 0) {
+ vxi = get_vx_info(current->vx_info);
+ } else if (id > 1) {
+ spin_lock(&vx_info_hash_lock);
+ vxi = get_vx_info(__lookup_vx_info(id));
+ spin_unlock(&vx_info_hash_lock);
+ }
+ return vxi;
+}
+
+/* xid_is_hashed()
+
+ * verify that xid is still hashed */
+
+int xid_is_hashed(xid_t xid)
+{
+ int hashed;
+
+ spin_lock(&vx_info_hash_lock);
+ hashed = (__lookup_vx_info(xid) != NULL);
+ spin_unlock(&vx_info_hash_lock);
+ return hashed;
+}
+
+#ifdef CONFIG_PROC_FS
+
+/* get_xid_list()
+
+ * get a subset of hashed xids for proc
+ * assumes size is at least one */
+
+int get_xid_list(int index, unsigned int *xids, int size)
+{
+ int hindex, nr_xids = 0;
+
+ /* only show current and children */
+ if (!vx_check(0, VS_ADMIN | VS_WATCH)) {
+ if (index > 0)
+ return 0;
+ xids[nr_xids] = vx_current_xid();
+ return 1;
+ }
+
+ for (hindex = 0; hindex < VX_HASH_SIZE; hindex++) {
+ struct hlist_head *head = &vx_info_hash[hindex];
+ struct hlist_node *pos;
+
+ spin_lock(&vx_info_hash_lock);
+ hlist_for_each(pos, head) {
+ struct vx_info *vxi;
+
+ if (--index > 0)
+ continue;
+
+ vxi = hlist_entry(pos, struct vx_info, vx_hlist);
+ xids[nr_xids] = vxi->vx_id;
+ if (++nr_xids >= size) {
+ spin_unlock(&vx_info_hash_lock);
+ goto out;
+ }
+ }
+ /* keep the lock time short */
+ spin_unlock(&vx_info_hash_lock);
+ }
+out:
+ return nr_xids;
+}
+#endif
+
+#ifdef CONFIG_VSERVER_DEBUG
+
+void dump_vx_info_inactive(int level)
+{
+ struct hlist_node *entry, *next;
+
+ hlist_for_each_safe(entry, next, &vx_info_inactive) {
+ struct vx_info *vxi =
+ list_entry(entry, struct vx_info, vx_hlist);
+
+ dump_vx_info(vxi, level);
+ }
+}
+
+#endif
+
+#if 0
+int vx_migrate_user(struct task_struct *p, struct vx_info *vxi)
+{
+ struct user_struct *new_user, *old_user;
+
+ if (!p || !vxi)
+ BUG();
+
+ if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
+ return -EACCES;
+
+ new_user = alloc_uid(vxi->vx_id, p->uid);
+ if (!new_user)
+ return -ENOMEM;
+
+ old_user = p->user;
+ if (new_user != old_user) {
+ atomic_inc(&new_user->processes);
+ atomic_dec(&old_user->processes);
+ p->user = new_user;
+ }
+ free_uid(old_user);
+ return 0;
+}
+#endif
+
+#if 0
+void vx_mask_cap_bset(struct vx_info *vxi, struct task_struct *p)
+{
+ // p->cap_effective &= vxi->vx_cap_bset;
+ p->cap_effective =
+ cap_intersect(p->cap_effective, vxi->cap_bset);
+ // p->cap_inheritable &= vxi->vx_cap_bset;
+ p->cap_inheritable =
+ cap_intersect(p->cap_inheritable, vxi->cap_bset);
+ // p->cap_permitted &= vxi->vx_cap_bset;
+ p->cap_permitted =
+ cap_intersect(p->cap_permitted, vxi->cap_bset);
+}
+#endif
+
+
+#include <linux/file.h>
+
+static int vx_openfd_task(struct task_struct *tsk)
+{
+ struct files_struct *files = tsk->files;
+ struct fdtable *fdt;
+ const unsigned long *bptr;
+ int count, total;
+
+ /* no rcu_read_lock() because of spin_lock() */
+ spin_lock(&files->file_lock);
+ fdt = files_fdtable(files);
+ bptr = fdt->open_fds->fds_bits;
+ count = fdt->max_fds / (sizeof(unsigned long) * 8);
+ for (total = 0; count > 0; count--) {
+ if (*bptr)
+ total += hweight_long(*bptr);
+ bptr++;
+ }
+ spin_unlock(&files->file_lock);
+ return total;
+}
+
+
+/* for *space compatibility */
+
+asmlinkage long sys_unshare(unsigned long);
+
+/*
+ * migrate task to new context
+ * gets vxi, puts old_vxi on change
+ * optionally unshares namespaces (hack)
+ */
+
+int vx_migrate_task(struct task_struct *p, struct vx_info *vxi, int unshare)
+{
+ struct vx_info *old_vxi;
+ int ret = 0;
+
+ if (!p || !vxi)
+ BUG();
+
+ vxdprintk(VXD_CBIT(xid, 5),
+ "vx_migrate_task(%p,%p[#%d.%d])", p, vxi,
+ vxi->vx_id, atomic_read(&vxi->vx_usecnt));
+
+ if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0) &&
+ !vx_info_flags(vxi, VXF_STATE_SETUP, 0))
+ return -EACCES;
+
+ if (vx_info_state(vxi, VXS_SHUTDOWN))
+ return -EFAULT;
+
+ old_vxi = task_get_vx_info(p);
+ if (old_vxi == vxi)
+ goto out;
+
+// if (!(ret = vx_migrate_user(p, vxi))) {
+ {
+ int openfd;
+
+ task_lock(p);
+ openfd = vx_openfd_task(p);
+
+ if (old_vxi) {
+ atomic_dec(&old_vxi->cvirt.nr_threads);
+ atomic_dec(&old_vxi->cvirt.nr_running);
+ __rlim_dec(&old_vxi->limit, RLIMIT_NPROC);
+ /* FIXME: what about the struct files here? */
+ __rlim_sub(&old_vxi->limit, VLIMIT_OPENFD, openfd);
+ /* account for the executable */
+ __rlim_dec(&old_vxi->limit, VLIMIT_DENTRY);
+ }
+ atomic_inc(&vxi->cvirt.nr_threads);
+ atomic_inc(&vxi->cvirt.nr_running);
+ __rlim_inc(&vxi->limit, RLIMIT_NPROC);
+ /* FIXME: what about the struct files here? */
+ __rlim_add(&vxi->limit, VLIMIT_OPENFD, openfd);
+ /* account for the executable */
+ __rlim_inc(&vxi->limit, VLIMIT_DENTRY);
+
+ if (old_vxi) {
+ release_vx_info(old_vxi, p);
+ clr_vx_info(&p->vx_info);
+ }
+ claim_vx_info(vxi, p);
+ set_vx_info(&p->vx_info, vxi);
+ p->xid = vxi->vx_id;
+
+ vxdprintk(VXD_CBIT(xid, 5),
+ "moved task %p into vxi:%p[#%d]",
+ p, vxi, vxi->vx_id);
+
+ // vx_mask_cap_bset(vxi, p);
+ task_unlock(p);
+
+ /* hack for *spaces to provide compatibility */
+ if (unshare) {
+ struct nsproxy *old_nsp, *new_nsp;
+
+ ret = unshare_nsproxy_namespaces(
+ CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER,
+ &new_nsp, NULL);
+ if (ret)
+ goto out;
+
+ old_nsp = xchg(&p->nsproxy, new_nsp);
+ vx_set_space(vxi, CLONE_NEWUTS | CLONE_NEWIPC | CLONE_NEWUSER);
+ put_nsproxy(old_nsp);
+ }
+ }
+out:
+ put_vx_info(old_vxi);
+ return ret;
+}
+
+int vx_set_reaper(struct vx_info *vxi, struct task_struct *p)
+{
+ struct task_struct *old_reaper;
+
+ if (!vxi)
+ return -EINVAL;
+
+ vxdprintk(VXD_CBIT(xid, 6),
+ "vx_set_reaper(%p[#%d],%p[#%d,%d])",
+ vxi, vxi->vx_id, p, p->xid, p->pid);
+
+ old_reaper = vxi->vx_reaper;
+ if (old_reaper == p)
+ return 0;
+
+ /* set new child reaper */
+ get_task_struct(p);
+ vxi->vx_reaper = p;
+ put_task_struct(old_reaper);
+ return 0;
+}
+
+int vx_set_init(struct vx_info *vxi, struct task_struct *p)
+{
+ if (!vxi)
+ return -EINVAL;
+
+ vxdprintk(VXD_CBIT(xid, 6),
+ "vx_set_init(%p[#%d],%p[#%d,%d,%d])",
+ vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
+
+ vxi->vx_flags &= ~VXF_STATE_INIT;
+ vxi->vx_initpid = p->tgid;
+ return 0;
+}
+
+void vx_exit_init(struct vx_info *vxi, struct task_struct *p, int code)
+{
+ vxdprintk(VXD_CBIT(xid, 6),
+ "vx_exit_init(%p[#%d],%p[#%d,%d,%d])",
+ vxi, vxi->vx_id, p, p->xid, p->pid, p->tgid);
+
+ vxi->exit_code = code;
+ vxi->vx_initpid = 0;
+}
+
+
+void vx_set_persistent(struct vx_info *vxi)
+{
+ vxdprintk(VXD_CBIT(xid, 6),
+ "vx_set_persistent(%p[#%d])", vxi, vxi->vx_id);
+
+ get_vx_info(vxi);
+ claim_vx_info(vxi, NULL);
+}
+
+void vx_clear_persistent(struct vx_info *vxi)
+{
+ vxdprintk(VXD_CBIT(xid, 6),
+ "vx_clear_persistent(%p[#%d])", vxi, vxi->vx_id);
+
+ release_vx_info(vxi, NULL);
+ put_vx_info(vxi);
+}
+
+void vx_update_persistent(struct vx_info *vxi)
+{
+ if (vx_info_flags(vxi, VXF_PERSISTENT, 0))
+ vx_set_persistent(vxi);
+ else
+ vx_clear_persistent(vxi);
+}
+
+
+/* task must be current or locked */
+
+void exit_vx_info(struct task_struct *p, int code)
+{
+ struct vx_info *vxi = p->vx_info;
+
+ if (vxi) {
+ atomic_dec(&vxi->cvirt.nr_threads);
+ vx_nproc_dec(p);
+
+ vxi->exit_code = code;
+ release_vx_info(vxi, p);
+ }
+}
+
+void exit_vx_info_early(struct task_struct *p, int code)
+{
+ struct vx_info *vxi = p->vx_info;
+
+ if (vxi) {
+ if (vxi->vx_initpid == p->tgid)
+ vx_exit_init(vxi, p, code);
+ if (vxi->vx_reaper == p)
+ vx_set_reaper(vxi, init_pid_ns.child_reaper);
+ }
+}
+
+
+/* vserver syscall commands below here */
+
+/* taks xid and vx_info functions */
+
+#include <asm/uaccess.h>
+
+
+int vc_task_xid(uint32_t id)
+{
+ xid_t xid;
+
+ if (id) {
+ struct task_struct *tsk;
+
+ read_lock(&tasklist_lock);
+ tsk = find_task_by_real_pid(id);
+ xid = (tsk) ? tsk->xid : -ESRCH;
+ read_unlock(&tasklist_lock);
+ } else
+ xid = vx_current_xid();
+ return xid;
+}
+
+
+int vc_vx_info(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_vx_info_v0 vc_data;
+
+ vc_data.xid = vxi->vx_id;
+ vc_data.initpid = vxi->vx_initpid;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+
+int vc_ctx_stat(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_stat_v0 vc_data;
+
+ vc_data.usecnt = atomic_read(&vxi->vx_usecnt);
+ vc_data.tasks = atomic_read(&vxi->vx_tasks);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+
+/* context functions */
+
+int vc_ctx_create(uint32_t xid, void __user *data)
+{
+ struct vcmd_ctx_create vc_data = { .flagword = VXF_INIT_SET };
+ struct vx_info *new_vxi;
+ int ret;
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ if ((xid > MAX_S_CONTEXT) || (xid < 2))
+ return -EINVAL;
+
+ new_vxi = __create_vx_info(xid);
+ if (IS_ERR(new_vxi))
+ return PTR_ERR(new_vxi);
+
+ /* initial flags */
+ new_vxi->vx_flags = vc_data.flagword;
+
+ ret = -ENOEXEC;
+ if (vs_state_change(new_vxi, VSC_STARTUP))
+ goto out;
+
+ ret = vx_migrate_task(current, new_vxi, (!data));
+ if (ret)
+ goto out;
+
+ /* return context id on success */
+ ret = new_vxi->vx_id;
+
+ /* get a reference for persistent contexts */
+ if ((vc_data.flagword & VXF_PERSISTENT))
+ vx_set_persistent(new_vxi);
+out:
+ release_vx_info(new_vxi, NULL);
+ put_vx_info(new_vxi);
+ return ret;
+}
+
+
+int vc_ctx_migrate(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_migrate vc_data = { .flagword = 0 };
+ int ret;
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = vx_migrate_task(current, vxi, 0);
+ if (ret)
+ return ret;
+ if (vc_data.flagword & VXM_SET_INIT)
+ ret = vx_set_init(vxi, current);
+ if (ret)
+ return ret;
+ if (vc_data.flagword & VXM_SET_REAPER)
+ ret = vx_set_reaper(vxi, current);
+ return ret;
+}
+
+
+int vc_get_cflags(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_flags_v0 vc_data;
+
+ vc_data.flagword = vxi->vx_flags;
+
+ /* special STATE flag handling */
+ vc_data.mask = vs_mask_flags(~0ULL, vxi->vx_flags, VXF_ONE_TIME);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+int vc_set_cflags(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_flags_v0 vc_data;
+ uint64_t mask, trigger;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ /* special STATE flag handling */
+ mask = vs_mask_mask(vc_data.mask, vxi->vx_flags, VXF_ONE_TIME);
+ trigger = (mask & vxi->vx_flags) ^ (mask & vc_data.flagword);
+
+ if (vxi == current->vx_info) {
+ /* if (trigger & VXF_STATE_SETUP)
+ vx_mask_cap_bset(vxi, current); */
+ if (trigger & VXF_STATE_INIT) {
+ int ret;
+
+ ret = vx_set_init(vxi, current);
+ if (ret)
+ return ret;
+ ret = vx_set_reaper(vxi, current);
+ if (ret)
+ return ret;
+ }
+ }
+
+ vxi->vx_flags = vs_mask_flags(vxi->vx_flags,
+ vc_data.flagword, mask);
+ if (trigger & VXF_PERSISTENT)
+ vx_update_persistent(vxi);
+
+ return 0;
+}
+
+
+static inline uint64_t caps_from_cap_t(kernel_cap_t c)
+{
+ uint64_t v = c.cap[0] | ((uint64_t)c.cap[1] << 32);
+
+ // printk("caps_from_cap_t(%08x:%08x) = %016llx\n", c.cap[1], c.cap[0], v);
+ return v;
+}
+
+static inline kernel_cap_t cap_t_from_caps(uint64_t v)
+{
+ kernel_cap_t c = __cap_empty_set;
+
+ c.cap[0] = v & 0xFFFFFFFF;
+ c.cap[1] = (v >> 32) & 0xFFFFFFFF;
+
+ // printk("cap_t_from_caps(%016llx) = %08x:%08x\n", v, c.cap[1], c.cap[0]);
+ return c;
+}
+
+
+static int do_get_caps(struct vx_info *vxi, uint64_t *bcaps, uint64_t *ccaps)
+{
+ if (bcaps)
+ *bcaps = caps_from_cap_t(vxi->vx_bcaps);
+ if (ccaps)
+ *ccaps = vxi->vx_ccaps;
+
+ return 0;
+}
+
+int vc_get_ccaps(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_caps_v1 vc_data;
+ int ret;
+
+ ret = do_get_caps(vxi, NULL, &vc_data.ccaps);
+ if (ret)
+ return ret;
+ vc_data.cmask = ~0ULL;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+static int do_set_caps(struct vx_info *vxi,
+ uint64_t bcaps, uint64_t bmask, uint64_t ccaps, uint64_t cmask)
+{
+ uint64_t bcold = caps_from_cap_t(vxi->vx_bcaps);
+
+#if 0
+ printk("do_set_caps(%16llx, %16llx, %16llx, %16llx)\n",
+ bcaps, bmask, ccaps, cmask);
+#endif
+ vxi->vx_bcaps = cap_t_from_caps(
+ vs_mask_flags(bcold, bcaps, bmask));
+ vxi->vx_ccaps = vs_mask_flags(vxi->vx_ccaps, ccaps, cmask);
+
+ return 0;
+}
+
+int vc_set_ccaps(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_caps_v1 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_set_caps(vxi, 0, 0, vc_data.ccaps, vc_data.cmask);
+}
+
+int vc_get_bcaps(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_bcaps vc_data;
+ int ret;
+
+ ret = do_get_caps(vxi, &vc_data.bcaps, NULL);
+ if (ret)
+ return ret;
+ vc_data.bmask = ~0ULL;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+int vc_set_bcaps(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_bcaps vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_set_caps(vxi, vc_data.bcaps, vc_data.bmask, 0, 0);
+}
+
+
+int vc_get_badness(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_badness_v0 vc_data;
+
+ vc_data.bias = vxi->vx_badness_bias;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+int vc_set_badness(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_badness_v0 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ vxi->vx_badness_bias = vc_data.bias;
+ return 0;
+}
+
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(free_vx_info);
+
--- a/kernel/vserver/cvirt.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/cvirt.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,301 @@
+/*
+ * linux/kernel/vserver/cvirt.c
+ *
+ * Virtual Server: Context Virtualization
+ *
+ * Copyright (C) 2004-2007 Herbert P<>tzl
+ *
+ * V0.01 broken out from limit.c
+ * V0.02 added utsname stuff
+ * V0.03 changed vcmds to vxi arg
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/utsname.h>
+#include <linux/vs_cvirt.h>
+#include <linux/vserver/switch.h>
+#include <linux/vserver/cvirt_cmd.h>
+
+#include <asm/uaccess.h>
+
+
+void vx_vsi_uptime(struct timespec *uptime, struct timespec *idle)
+{
+ struct vx_info *vxi = current->vx_info;
+
+ set_normalized_timespec(uptime,
+ uptime->tv_sec - vxi->cvirt.bias_uptime.tv_sec,
+ uptime->tv_nsec - vxi->cvirt.bias_uptime.tv_nsec);
+ if (!idle)
+ return;
+ set_normalized_timespec(idle,
+ idle->tv_sec - vxi->cvirt.bias_idle.tv_sec,
+ idle->tv_nsec - vxi->cvirt.bias_idle.tv_nsec);
+ return;
+}
+
+uint64_t vx_idle_jiffies(void)
+{
+ return init_task.utime + init_task.stime;
+}
+
+
+
+static inline uint32_t __update_loadavg(uint32_t load,
+ int wsize, int delta, int n)
+{
+ unsigned long long calc, prev;
+
+ /* just set it to n */
+ if (unlikely(delta >= wsize))
+ return (n << FSHIFT);
+
+ calc = delta * n;
+ calc <<= FSHIFT;
+ prev = (wsize - delta);
+ prev *= load;
+ calc += prev;
+ do_div(calc, wsize);
+ return calc;
+}
+
+
+void vx_update_load(struct vx_info *vxi)
+{
+ uint32_t now, last, delta;
+ unsigned int nr_running, nr_uninterruptible;
+ unsigned int total;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vxi->cvirt.load_lock, flags);
+
+ now = jiffies;
+ last = vxi->cvirt.load_last;
+ delta = now - last;
+
+ if (delta < 5*HZ)
+ goto out;
+
+ nr_running = atomic_read(&vxi->cvirt.nr_running);
+ nr_uninterruptible = atomic_read(&vxi->cvirt.nr_uninterruptible);
+ total = nr_running + nr_uninterruptible;
+
+ vxi->cvirt.load[0] = __update_loadavg(vxi->cvirt.load[0],
+ 60*HZ, delta, total);
+ vxi->cvirt.load[1] = __update_loadavg(vxi->cvirt.load[1],
+ 5*60*HZ, delta, total);
+ vxi->cvirt.load[2] = __update_loadavg(vxi->cvirt.load[2],
+ 15*60*HZ, delta, total);
+
+ vxi->cvirt.load_last = now;
+out:
+ atomic_inc(&vxi->cvirt.load_updates);
+ spin_unlock_irqrestore(&vxi->cvirt.load_lock, flags);
+}
+
+
+/*
+ * Commands to do_syslog:
+ *
+ * 0 -- Close the log. Currently a NOP.
+ * 1 -- Open the log. Currently a NOP.
+ * 2 -- Read from the log.
+ * 3 -- Read all messages remaining in the ring buffer.
+ * 4 -- Read and clear all messages remaining in the ring buffer
+ * 5 -- Clear ring buffer.
+ * 6 -- Disable printk's to console
+ * 7 -- Enable printk's to console
+ * 8 -- Set level of messages printed to console
+ * 9 -- Return number of unread characters in the log buffer
+ * 10 -- Return size of the log buffer
+ */
+int vx_do_syslog(int type, char __user *buf, int len)
+{
+ int error = 0;
+ int do_clear = 0;
+ struct vx_info *vxi = current->vx_info;
+ struct _vx_syslog *log;
+
+ if (!vxi)
+ return -EINVAL;
+ log = &vxi->cvirt.syslog;
+
+ switch (type) {
+ case 0: /* Close log */
+ case 1: /* Open log */
+ break;
+ case 2: /* Read from log */
+ error = wait_event_interruptible(log->log_wait,
+ (log->log_start - log->log_end));
+ if (error)
+ break;
+ spin_lock_irq(&log->logbuf_lock);
+ spin_unlock_irq(&log->logbuf_lock);
+ break;
+ case 4: /* Read/clear last kernel messages */
+ do_clear = 1;
+ /* fall through */
+ case 3: /* Read last kernel messages */
+ return 0;
+
+ case 5: /* Clear ring buffer */
+ return 0;
+
+ case 6: /* Disable logging to console */
+ case 7: /* Enable logging to console */
+ case 8: /* Set level of messages printed to console */
+ break;
+
+ case 9: /* Number of chars in the log buffer */
+ return 0;
+ case 10: /* Size of the log buffer */
+ return 0;
+ default:
+ error = -EINVAL;
+ break;
+ }
+ return error;
+}
+
+
+/* virtual host info names */
+
+static char *vx_vhi_name(struct vx_info *vxi, int id)
+{
+ struct nsproxy *nsproxy;
+ struct uts_namespace *uts;
+
+
+ if (id == VHIN_CONTEXT)
+ return vxi->vx_name;
+
+ nsproxy = vxi->vx_nsproxy;
+ if (!nsproxy)
+ return NULL;
+
+ uts = nsproxy->uts_ns;
+ if (!uts)
+ return NULL;
+
+ switch (id) {
+ case VHIN_SYSNAME:
+ return uts->name.sysname;
+ case VHIN_NODENAME:
+ return uts->name.nodename;
+ case VHIN_RELEASE:
+ return uts->name.release;
+ case VHIN_VERSION:
+ return uts->name.version;
+ case VHIN_MACHINE:
+ return uts->name.machine;
+ case VHIN_DOMAINNAME:
+ return uts->name.domainname;
+ default:
+ return NULL;
+ }
+ return NULL;
+}
+
+int vc_set_vhi_name(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_vhi_name_v0 vc_data;
+ char *name;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ name = vx_vhi_name(vxi, vc_data.field);
+ if (!name)
+ return -EINVAL;
+
+ memcpy(name, vc_data.name, 65);
+ return 0;
+}
+
+int vc_get_vhi_name(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_vhi_name_v0 vc_data;
+ char *name;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ name = vx_vhi_name(vxi, vc_data.field);
+ if (!name)
+ return -EINVAL;
+
+ memcpy(vc_data.name, name, 65);
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+
+int vc_virt_stat(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_virt_stat_v0 vc_data;
+ struct _vx_cvirt *cvirt = &vxi->cvirt;
+ struct timespec uptime;
+
+ do_posix_clock_monotonic_gettime(&uptime);
+ set_normalized_timespec(&uptime,
+ uptime.tv_sec - cvirt->bias_uptime.tv_sec,
+ uptime.tv_nsec - cvirt->bias_uptime.tv_nsec);
+
+ vc_data.offset = timeval_to_ns(&cvirt->bias_tv);
+ vc_data.uptime = timespec_to_ns(&uptime);
+ vc_data.nr_threads = atomic_read(&cvirt->nr_threads);
+ vc_data.nr_running = atomic_read(&cvirt->nr_running);
+ vc_data.nr_uninterruptible = atomic_read(&cvirt->nr_uninterruptible);
+ vc_data.nr_onhold = atomic_read(&cvirt->nr_onhold);
+ vc_data.nr_forks = atomic_read(&cvirt->total_forks);
+ vc_data.load[0] = cvirt->load[0];
+ vc_data.load[1] = cvirt->load[1];
+ vc_data.load[2] = cvirt->load[2];
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+
+#ifdef CONFIG_VSERVER_VTIME
+
+/* virtualized time base */
+
+void vx_gettimeofday(struct timeval *tv)
+{
+ do_gettimeofday(tv);
+ if (!vx_flags(VXF_VIRT_TIME, 0))
+ return;
+
+ tv->tv_sec += current->vx_info->cvirt.bias_tv.tv_sec;
+ tv->tv_usec += current->vx_info->cvirt.bias_tv.tv_usec;
+
+ if (tv->tv_usec >= USEC_PER_SEC) {
+ tv->tv_sec++;
+ tv->tv_usec -= USEC_PER_SEC;
+ } else if (tv->tv_usec < 0) {
+ tv->tv_sec--;
+ tv->tv_usec += USEC_PER_SEC;
+ }
+}
+
+int vx_settimeofday(struct timespec *ts)
+{
+ struct timeval tv;
+
+ if (!vx_flags(VXF_VIRT_TIME, 0))
+ return do_settimeofday(ts);
+
+ do_gettimeofday(&tv);
+ current->vx_info->cvirt.bias_tv.tv_sec =
+ ts->tv_sec - tv.tv_sec;
+ current->vx_info->cvirt.bias_tv.tv_usec =
+ (ts->tv_nsec/NSEC_PER_USEC) - tv.tv_usec;
+ return 0;
+}
+
+#endif
+
--- a/kernel/vserver/cvirt_init.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/cvirt_init.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,69 @@
+
+
+extern uint64_t vx_idle_jiffies(void);
+
+static inline void vx_info_init_cvirt(struct _vx_cvirt *cvirt)
+{
+ uint64_t idle_jiffies = vx_idle_jiffies();
+ uint64_t nsuptime;
+
+ do_posix_clock_monotonic_gettime(&cvirt->bias_uptime);
+ nsuptime = (unsigned long long)cvirt->bias_uptime.tv_sec
+ * NSEC_PER_SEC + cvirt->bias_uptime.tv_nsec;
+ cvirt->bias_clock = nsec_to_clock_t(nsuptime);
+ cvirt->bias_tv.tv_sec = 0;
+ cvirt->bias_tv.tv_usec = 0;
+
+ jiffies_to_timespec(idle_jiffies, &cvirt->bias_idle);
+ atomic_set(&cvirt->nr_threads, 0);
+ atomic_set(&cvirt->nr_running, 0);
+ atomic_set(&cvirt->nr_uninterruptible, 0);
+ atomic_set(&cvirt->nr_onhold, 0);
+
+ spin_lock_init(&cvirt->load_lock);
+ cvirt->load_last = jiffies;
+ atomic_set(&cvirt->load_updates, 0);
+ cvirt->load[0] = 0;
+ cvirt->load[1] = 0;
+ cvirt->load[2] = 0;
+ atomic_set(&cvirt->total_forks, 0);
+
+ spin_lock_init(&cvirt->syslog.logbuf_lock);
+ init_waitqueue_head(&cvirt->syslog.log_wait);
+ cvirt->syslog.log_start = 0;
+ cvirt->syslog.log_end = 0;
+ cvirt->syslog.con_start = 0;
+ cvirt->syslog.logged_chars = 0;
+}
+
+static inline
+void vx_info_init_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
+{
+ // cvirt_pc->cpustat = { 0 };
+}
+
+static inline void vx_info_exit_cvirt(struct _vx_cvirt *cvirt)
+{
+ int value;
+
+ vxwprintk_xid((value = atomic_read(&cvirt->nr_threads)),
+ "!!! cvirt: %p[nr_threads] = %d on exit.",
+ cvirt, value);
+ vxwprintk_xid((value = atomic_read(&cvirt->nr_running)),
+ "!!! cvirt: %p[nr_running] = %d on exit.",
+ cvirt, value);
+ vxwprintk_xid((value = atomic_read(&cvirt->nr_uninterruptible)),
+ "!!! cvirt: %p[nr_uninterruptible] = %d on exit.",
+ cvirt, value);
+ vxwprintk_xid((value = atomic_read(&cvirt->nr_onhold)),
+ "!!! cvirt: %p[nr_onhold] = %d on exit.",
+ cvirt, value);
+ return;
+}
+
+static inline
+void vx_info_exit_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc, int cpu)
+{
+ return;
+}
+
--- a/kernel/vserver/cvirt_proc.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/cvirt_proc.h 2008-04-21 13:01:29.000000000 -0400
@@ -0,0 +1,135 @@
+#ifndef _VX_CVIRT_PROC_H
+#define _VX_CVIRT_PROC_H
+
+#include <linux/nsproxy.h>
+#include <linux/mnt_namespace.h>
+#include <linux/ipc_namespace.h>
+#include <linux/utsname.h>
+#include <linux/ipc.h>
+
+
+static inline
+int vx_info_proc_nsproxy(struct nsproxy *nsproxy, char *buffer)
+{
+ struct mnt_namespace *ns;
+ struct uts_namespace *uts;
+ struct ipc_namespace *ipc;
+ struct path path;
+ char *pstr, *root;
+ int length = 0;
+
+ if (!nsproxy)
+ goto out;
+
+ length += sprintf(buffer + length,
+ "NSProxy:\t%p [%p,%p,%p]\n",
+ nsproxy, nsproxy->mnt_ns,
+ nsproxy->uts_ns, nsproxy->ipc_ns);
+
+ ns = nsproxy->mnt_ns;
+ if (!ns)
+ goto skip_ns;
+
+ pstr = kmalloc(PATH_MAX, GFP_KERNEL);
+ if (!pstr)
+ goto skip_ns;
+
+ path.mnt = ns->root;
+ path.dentry = ns->root->mnt_root;
+ root = d_path(&path, pstr, PATH_MAX - 2);
+ length += sprintf(buffer + length,
+ "Namespace:\t%p [#%u]\n"
+ "RootPath:\t%s\n",
+ ns, atomic_read(&ns->count),
+ root);
+ kfree(pstr);
+skip_ns:
+
+ uts = nsproxy->uts_ns;
+ if (!uts)
+ goto skip_uts;
+
+ length += sprintf(buffer + length,
+ "SysName:\t%.*s\n"
+ "NodeName:\t%.*s\n"
+ "Release:\t%.*s\n"
+ "Version:\t%.*s\n"
+ "Machine:\t%.*s\n"
+ "DomainName:\t%.*s\n",
+ __NEW_UTS_LEN, uts->name.sysname,
+ __NEW_UTS_LEN, uts->name.nodename,
+ __NEW_UTS_LEN, uts->name.release,
+ __NEW_UTS_LEN, uts->name.version,
+ __NEW_UTS_LEN, uts->name.machine,
+ __NEW_UTS_LEN, uts->name.domainname);
+skip_uts:
+
+ ipc = nsproxy->ipc_ns;
+ if (!ipc)
+ goto skip_ipc;
+
+ length += sprintf(buffer + length,
+ "SEMS:\t\t%d %d %d %d %d\n"
+ "MSG:\t\t%d %d %d\n"
+ "SHM:\t\t%lu %lu %d %d\n",
+ ipc->sem_ctls[0], ipc->sem_ctls[1],
+ ipc->sem_ctls[2], ipc->sem_ctls[3],
+ ipc->used_sems,
+ ipc->msg_ctlmax, ipc->msg_ctlmnb, ipc->msg_ctlmni,
+ (unsigned long)ipc->shm_ctlmax,
+ (unsigned long)ipc->shm_ctlall,
+ ipc->shm_ctlmni, ipc->shm_tot);
+skip_ipc:
+out:
+ return length;
+}
+
+
+#include <linux/sched.h>
+
+#define LOAD_INT(x) ((x) >> FSHIFT)
+#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1 - 1)) * 100)
+
+static inline
+int vx_info_proc_cvirt(struct _vx_cvirt *cvirt, char *buffer)
+{
+ int length = 0;
+ int a, b, c;
+
+ length += sprintf(buffer + length,
+ "BiasUptime:\t%lu.%02lu\n",
+ (unsigned long)cvirt->bias_uptime.tv_sec,
+ (cvirt->bias_uptime.tv_nsec / (NSEC_PER_SEC / 100)));
+
+ a = cvirt->load[0] + (FIXED_1 / 200);
+ b = cvirt->load[1] + (FIXED_1 / 200);
+ c = cvirt->load[2] + (FIXED_1 / 200);
+ length += sprintf(buffer + length,
+ "nr_threads:\t%d\n"
+ "nr_running:\t%d\n"
+ "nr_unintr:\t%d\n"
+ "nr_onhold:\t%d\n"
+ "load_updates:\t%d\n"
+ "loadavg:\t%d.%02d %d.%02d %d.%02d\n"
+ "total_forks:\t%d\n",
+ atomic_read(&cvirt->nr_threads),
+ atomic_read(&cvirt->nr_running),
+ atomic_read(&cvirt->nr_uninterruptible),
+ atomic_read(&cvirt->nr_onhold),
+ atomic_read(&cvirt->load_updates),
+ LOAD_INT(a), LOAD_FRAC(a),
+ LOAD_INT(b), LOAD_FRAC(b),
+ LOAD_INT(c), LOAD_FRAC(c),
+ atomic_read(&cvirt->total_forks));
+ return length;
+}
+
+static inline
+int vx_info_proc_cvirt_pc(struct _vx_cvirt_pc *cvirt_pc,
+ char *buffer, int cpu)
+{
+ int length = 0;
+ return length;
+}
+
+#endif /* _VX_CVIRT_PROC_H */
--- a/kernel/vserver/debug.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/debug.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,32 @@
+/*
+ * kernel/vserver/debug.c
+ *
+ * Copyright (C) 2005-2007 Herbert P<>tzl
+ *
+ * V0.01 vx_info dump support
+ *
+ */
+
+#include <linux/module.h>
+
+#include <linux/vserver/context.h>
+
+
+void dump_vx_info(struct vx_info *vxi, int level)
+{
+ printk("vx_info %p[#%d, %d.%d, %4x]\n", vxi, vxi->vx_id,
+ atomic_read(&vxi->vx_usecnt),
+ atomic_read(&vxi->vx_tasks),
+ vxi->vx_state);
+ if (level > 0) {
+ __dump_vx_limit(&vxi->limit);
+ __dump_vx_sched(&vxi->sched);
+ __dump_vx_cvirt(&vxi->cvirt);
+ __dump_vx_cacct(&vxi->cacct);
+ }
+ printk("---\n");
+}
+
+
+EXPORT_SYMBOL_GPL(dump_vx_info);
+
--- a/kernel/vserver/device.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/device.c 2008-04-21 12:35:24.000000000 -0400
@@ -0,0 +1,443 @@
+/*
+ * linux/kernel/vserver/device.c
+ *
+ * Linux-VServer: Device Support
+ *
+ * Copyright (C) 2006 Herbert P<>tzl
+ * Copyright (C) 2007 Daniel Hokka Zakrisson
+ *
+ * V0.01 device mapping basics
+ * V0.02 added defaults
+ *
+ */
+
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/hash.h>
+
+#include <asm/errno.h>
+#include <asm/uaccess.h>
+#include <linux/vserver/base.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/context.h>
+#include <linux/vserver/device.h>
+#include <linux/vserver/device_cmd.h>
+
+
+#define DMAP_HASH_BITS 4
+
+
+struct vs_mapping {
+ union {
+ struct hlist_node hlist;
+ struct list_head list;
+ } u;
+#define dm_hlist u.hlist
+#define dm_list u.list
+ xid_t xid;
+ dev_t device;
+ struct vx_dmap_target target;
+};
+
+
+static struct hlist_head dmap_main_hash[1 << DMAP_HASH_BITS];
+
+static spinlock_t dmap_main_hash_lock = SPIN_LOCK_UNLOCKED;
+
+static struct vx_dmap_target dmap_defaults[2] = {
+ { .flags = DATTR_OPEN },
+ { .flags = DATTR_OPEN },
+};
+
+
+struct kmem_cache *dmap_cachep __read_mostly;
+
+int __init dmap_cache_init(void)
+{
+ dmap_cachep = kmem_cache_create("dmap_cache",
+ sizeof(struct vs_mapping), 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ return 0;
+}
+
+__initcall(dmap_cache_init);
+
+
+static inline unsigned int __hashval(dev_t dev, int bits)
+{
+ return hash_long((unsigned long)dev, bits);
+}
+
+
+/* __hash_mapping()
+ * add the mapping to the hash table
+ */
+static inline void __hash_mapping(struct vx_info *vxi, struct vs_mapping *vdm)
+{
+ spinlock_t *hash_lock = &dmap_main_hash_lock;
+ struct hlist_head *head, *hash = dmap_main_hash;
+ int device = vdm->device;
+
+ spin_lock(hash_lock);
+ vxdprintk(VXD_CBIT(misc, 8), "__hash_mapping: %p[#%d] %08x:%08x",
+ vxi, vxi ? vxi->vx_id : 0, device, vdm->target.target);
+
+ head = &hash[__hashval(device, DMAP_HASH_BITS)];
+ hlist_add_head(&vdm->dm_hlist, head);
+ spin_unlock(hash_lock);
+}
+
+
+static inline int __mode_to_default(umode_t mode)
+{
+ switch (mode) {
+ case S_IFBLK:
+ return 0;
+ case S_IFCHR:
+ return 1;
+ default:
+ BUG();
+ }
+}
+
+
+/* __set_default()
+ * set a default
+ */
+static inline void __set_default(struct vx_info *vxi, umode_t mode,
+ struct vx_dmap_target *vdmt)
+{
+ spinlock_t *hash_lock = &dmap_main_hash_lock;
+ spin_lock(hash_lock);
+
+ if (vxi)
+ vxi->dmap.targets[__mode_to_default(mode)] = *vdmt;
+ else
+ dmap_defaults[__mode_to_default(mode)] = *vdmt;
+
+
+ spin_unlock(hash_lock);
+
+ vxdprintk(VXD_CBIT(misc, 8), "__set_default: %p[#%u] %08x %04x",
+ vxi, vxi ? vxi->vx_id : 0, vdmt->target, vdmt->flags);
+}
+
+
+/* __remove_default()
+ * remove a default
+ */
+static inline int __remove_default(struct vx_info *vxi, umode_t mode)
+{
+ spinlock_t *hash_lock = &dmap_main_hash_lock;
+ spin_lock(hash_lock);
+
+ if (vxi)
+ vxi->dmap.targets[__mode_to_default(mode)].flags = 0;
+ else /* remove == reset */
+ dmap_defaults[__mode_to_default(mode)].flags = DATTR_OPEN | mode;
+
+ spin_unlock(hash_lock);
+ return 0;
+}
+
+
+/* __find_mapping()
+ * find a mapping in the hash table
+ *
+ * caller must hold hash_lock
+ */
+static inline int __find_mapping(xid_t xid, dev_t device, umode_t mode,
+ struct vs_mapping **local, struct vs_mapping **global)
+{
+ struct hlist_head *hash = dmap_main_hash;
+ struct hlist_head *head = &hash[__hashval(device, DMAP_HASH_BITS)];
+ struct hlist_node *pos;
+ struct vs_mapping *vdm;
+
+ *local = NULL;
+ if (global)
+ *global = NULL;
+
+ hlist_for_each(pos, head) {
+ vdm = hlist_entry(pos, struct vs_mapping, dm_hlist);
+
+ if ((vdm->device == device) &&
+ !((vdm->target.flags ^ mode) & S_IFMT)) {
+ if (vdm->xid == xid) {
+ *local = vdm;
+ return 1;
+ } else if (global && vdm->xid == 0)
+ *global = vdm;
+ }
+ }
+
+ if (global && *global)
+ return 0;
+ else
+ return -ENOENT;
+}
+
+
+/* __lookup_mapping()
+ * find a mapping and store the result in target and flags
+ */
+static inline int __lookup_mapping(struct vx_info *vxi,
+ dev_t device, dev_t *target, int *flags, umode_t mode)
+{
+ spinlock_t *hash_lock = &dmap_main_hash_lock;
+ struct vs_mapping *vdm, *global;
+ struct vx_dmap_target *vdmt;
+ int ret = 0;
+ xid_t xid = vxi->vx_id;
+ int index;
+
+ spin_lock(hash_lock);
+ if (__find_mapping(xid, device, mode, &vdm, &global) > 0) {
+ ret = 1;
+ vdmt = &vdm->target;
+ goto found;
+ }
+
+ index = __mode_to_default(mode);
+ if (vxi && vxi->dmap.targets[index].flags) {
+ ret = 2;
+ vdmt = &vxi->dmap.targets[index];
+ } else if (global) {
+ ret = 3;
+ vdmt = &global->target;
+ goto found;
+ } else {
+ ret = 4;
+ vdmt = &dmap_defaults[index];
+ }
+
+found:
+ if (target && (vdmt->flags & DATTR_REMAP))
+ *target = vdmt->target;
+ else if (target)
+ *target = device;
+ if (flags)
+ *flags = vdmt->flags;
+
+ spin_unlock(hash_lock);
+
+ return ret;
+}
+
+
+/* __remove_mapping()
+ * remove a mapping from the hash table
+ */
+static inline int __remove_mapping(struct vx_info *vxi, dev_t device,
+ umode_t mode)
+{
+ spinlock_t *hash_lock = &dmap_main_hash_lock;
+ struct vs_mapping *vdm = NULL;
+ int ret = 0;
+
+ spin_lock(hash_lock);
+
+ ret = __find_mapping((vxi ? vxi->vx_id : 0), device, mode, &vdm,
+ NULL);
+ vxdprintk(VXD_CBIT(misc, 8), "__remove_mapping: %p[#%d] %08x %04x",
+ vxi, vxi ? vxi->vx_id : 0, device, mode);
+ if (ret < 0)
+ goto out;
+ hlist_del(&vdm->dm_hlist);
+
+out:
+ spin_unlock(hash_lock);
+ if (vdm)
+ kmem_cache_free(dmap_cachep, vdm);
+ return ret;
+}
+
+
+
+int vs_map_device(struct vx_info *vxi,
+ dev_t device, dev_t *target, umode_t mode)
+{
+ int ret, flags = DATTR_MASK;
+
+ if (!vxi) {
+ if (target)
+ *target = device;
+ goto out;
+ }
+ ret = __lookup_mapping(vxi, device, target, &flags, mode);
+ vxdprintk(VXD_CBIT(misc, 8), "vs_map_device: %08x target: %08x flags: %04x mode: %04x mapped=%d",
+ device, target ? *target : 0, flags, mode, ret);
+out:
+ return (flags & DATTR_MASK);
+}
+
+
+
+static int do_set_mapping(struct vx_info *vxi,
+ dev_t device, dev_t target, int flags, umode_t mode)
+{
+ if (device) {
+ struct vs_mapping *new;
+
+ new = kmem_cache_alloc(dmap_cachep, GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
+
+ INIT_HLIST_NODE(&new->dm_hlist);
+ new->device = device;
+ new->target.target = target;
+ new->target.flags = flags | mode;
+ new->xid = (vxi ? vxi->vx_id : 0);
+
+ vxdprintk(VXD_CBIT(misc, 8), "do_set_mapping: %08x target: %08x flags: %04x", device, target, flags);
+ __hash_mapping(vxi, new);
+ } else {
+ struct vx_dmap_target new = {
+ .target = target,
+ .flags = flags | mode,
+ };
+ __set_default(vxi, mode, &new);
+ }
+ return 0;
+}
+
+
+static int do_unset_mapping(struct vx_info *vxi,
+ dev_t device, dev_t target, int flags, umode_t mode)
+{
+ int ret = -EINVAL;
+
+ if (device) {
+ ret = __remove_mapping(vxi, device, mode);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = __remove_default(vxi, mode);
+ if (ret < 0)
+ goto out;
+ }
+
+out:
+ return ret;
+}
+
+
+static inline int __user_device(const char __user *name, dev_t *dev,
+ umode_t *mode)
+{
+ struct nameidata nd;
+ int ret;
+
+ if (!name) {
+ *dev = 0;
+ return 0;
+ }
+ ret = user_path_walk_link(name, &nd);
+ if (ret)
+ return ret;
+ if (nd.path.dentry->d_inode) {
+ *dev = nd.path.dentry->d_inode->i_rdev;
+ *mode = nd.path.dentry->d_inode->i_mode;
+ }
+ path_put(&nd.path);
+ return 0;
+}
+
+static inline int __mapping_mode(dev_t device, dev_t target,
+ umode_t device_mode, umode_t target_mode, umode_t *mode)
+{
+ if (device)
+ *mode = device_mode & S_IFMT;
+ else if (target)
+ *mode = target_mode & S_IFMT;
+ else
+ return -EINVAL;
+
+ /* if both given, device and target mode have to match */
+ if (device && target &&
+ ((device_mode ^ target_mode) & S_IFMT))
+ return -EINVAL;
+ return 0;
+}
+
+
+static inline int do_mapping(struct vx_info *vxi, const char __user *device_path,
+ const char __user *target_path, int flags, int set)
+{
+ dev_t device = ~0, target = ~0;
+ umode_t device_mode = 0, target_mode = 0, mode;
+ int ret;
+
+ ret = __user_device(device_path, &device, &device_mode);
+ if (ret)
+ return ret;
+ ret = __user_device(target_path, &target, &target_mode);
+ if (ret)
+ return ret;
+
+ ret = __mapping_mode(device, target,
+ device_mode, target_mode, &mode);
+ if (ret)
+ return ret;
+
+ if (set)
+ return do_set_mapping(vxi, device, target,
+ flags, mode);
+ else
+ return do_unset_mapping(vxi, device, target,
+ flags, mode);
+}
+
+
+int vc_set_mapping(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_set_mapping_v0 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_mapping(vxi, vc_data.device, vc_data.target,
+ vc_data.flags, 1);
+}
+
+int vc_unset_mapping(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_set_mapping_v0 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_mapping(vxi, vc_data.device, vc_data.target,
+ vc_data.flags, 0);
+}
+
+
+#ifdef CONFIG_COMPAT
+
+int vc_set_mapping_x32(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_set_mapping_v0_x32 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
+ compat_ptr(vc_data.target_ptr), vc_data.flags, 1);
+}
+
+int vc_unset_mapping_x32(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_set_mapping_v0_x32 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_mapping(vxi, compat_ptr(vc_data.device_ptr),
+ compat_ptr(vc_data.target_ptr), vc_data.flags, 0);
+}
+
+#endif /* CONFIG_COMPAT */
+
+
--- a/kernel/vserver/dlimit.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/dlimit.c 2008-04-21 12:36:09.000000000 -0400
@@ -0,0 +1,521 @@
+/*
+ * linux/kernel/vserver/dlimit.c
+ *
+ * Virtual Server: Context Disk Limits
+ *
+ * Copyright (C) 2004-2007 Herbert P<>tzl
+ *
+ * V0.01 initial version
+ * V0.02 compat32 splitup
+ *
+ */
+
+#include <linux/statfs.h>
+#include <linux/sched.h>
+#include <linux/vs_tag.h>
+#include <linux/vs_dlimit.h>
+#include <linux/vserver/dlimit_cmd.h>
+
+#include <asm/uaccess.h>
+
+/* __alloc_dl_info()
+
+ * allocate an initialized dl_info struct
+ * doesn't make it visible (hash) */
+
+static struct dl_info *__alloc_dl_info(struct super_block *sb, tag_t tag)
+{
+ struct dl_info *new = NULL;
+
+ vxdprintk(VXD_CBIT(dlim, 5),
+ "alloc_dl_info(%p,%d)*", sb, tag);
+
+ /* would this benefit from a slab cache? */
+ new = kmalloc(sizeof(struct dl_info), GFP_KERNEL);
+ if (!new)
+ return 0;
+
+ memset(new, 0, sizeof(struct dl_info));
+ new->dl_tag = tag;
+ new->dl_sb = sb;
+ INIT_RCU_HEAD(&new->dl_rcu);
+ INIT_HLIST_NODE(&new->dl_hlist);
+ spin_lock_init(&new->dl_lock);
+ atomic_set(&new->dl_refcnt, 0);
+ atomic_set(&new->dl_usecnt, 0);
+
+ /* rest of init goes here */
+
+ vxdprintk(VXD_CBIT(dlim, 4),
+ "alloc_dl_info(%p,%d) = %p", sb, tag, new);
+ return new;
+}
+
+/* __dealloc_dl_info()
+
+ * final disposal of dl_info */
+
+static void __dealloc_dl_info(struct dl_info *dli)
+{
+ vxdprintk(VXD_CBIT(dlim, 4),
+ "dealloc_dl_info(%p)", dli);
+
+ dli->dl_hlist.next = LIST_POISON1;
+ dli->dl_tag = -1;
+ dli->dl_sb = 0;
+
+ BUG_ON(atomic_read(&dli->dl_usecnt));
+ BUG_ON(atomic_read(&dli->dl_refcnt));
+
+ kfree(dli);
+}
+
+
+/* hash table for dl_info hash */
+
+#define DL_HASH_SIZE 13
+
+struct hlist_head dl_info_hash[DL_HASH_SIZE];
+
+static spinlock_t dl_info_hash_lock = SPIN_LOCK_UNLOCKED;
+
+
+static inline unsigned int __hashval(struct super_block *sb, tag_t tag)
+{
+ return ((tag ^ (unsigned long)sb) % DL_HASH_SIZE);
+}
+
+
+
+/* __hash_dl_info()
+
+ * add the dli to the global hash table
+ * requires the hash_lock to be held */
+
+static inline void __hash_dl_info(struct dl_info *dli)
+{
+ struct hlist_head *head;
+
+ vxdprintk(VXD_CBIT(dlim, 6),
+ "__hash_dl_info: %p[#%d]", dli, dli->dl_tag);
+ get_dl_info(dli);
+ head = &dl_info_hash[__hashval(dli->dl_sb, dli->dl_tag)];
+ hlist_add_head_rcu(&dli->dl_hlist, head);
+}
+
+/* __unhash_dl_info()
+
+ * remove the dli from the global hash table
+ * requires the hash_lock to be held */
+
+static inline void __unhash_dl_info(struct dl_info *dli)
+{
+ vxdprintk(VXD_CBIT(dlim, 6),
+ "__unhash_dl_info: %p[#%d]", dli, dli->dl_tag);
+ hlist_del_rcu(&dli->dl_hlist);
+ put_dl_info(dli);
+}
+
+
+/* __lookup_dl_info()
+
+ * requires the rcu_read_lock()
+ * doesn't increment the dl_refcnt */
+
+static inline struct dl_info *__lookup_dl_info(struct super_block *sb, tag_t tag)
+{
+ struct hlist_head *head = &dl_info_hash[__hashval(sb, tag)];
+ struct hlist_node *pos;
+ struct dl_info *dli;
+
+ hlist_for_each_entry_rcu(dli, pos, head, dl_hlist) {
+
+ if (dli->dl_tag == tag && dli->dl_sb == sb) {
+ return dli;
+ }
+ }
+ return NULL;
+}
+
+
+struct dl_info *locate_dl_info(struct super_block *sb, tag_t tag)
+{
+ struct dl_info *dli;
+
+ rcu_read_lock();
+ dli = get_dl_info(__lookup_dl_info(sb, tag));
+ vxdprintk(VXD_CBIT(dlim, 7),
+ "locate_dl_info(%p,#%d) = %p", sb, tag, dli);
+ rcu_read_unlock();
+ return dli;
+}
+
+void rcu_free_dl_info(struct rcu_head *head)
+{
+ struct dl_info *dli = container_of(head, struct dl_info, dl_rcu);
+ int usecnt, refcnt;
+
+ BUG_ON(!dli || !head);
+
+ usecnt = atomic_read(&dli->dl_usecnt);
+ BUG_ON(usecnt < 0);
+
+ refcnt = atomic_read(&dli->dl_refcnt);
+ BUG_ON(refcnt < 0);
+
+ vxdprintk(VXD_CBIT(dlim, 3),
+ "rcu_free_dl_info(%p)", dli);
+ if (!usecnt)
+ __dealloc_dl_info(dli);
+ else
+ printk("!!! rcu didn't free\n");
+}
+
+
+
+
+static int do_addrem_dlimit(uint32_t id, const char __user *name,
+ uint32_t flags, int add)
+{
+ struct nameidata nd;
+ int ret;
+
+ ret = user_path_walk_link(name, &nd);
+ if (!ret) {
+ struct super_block *sb;
+ struct dl_info *dli;
+
+ ret = -EINVAL;
+ if (!nd.path.dentry->d_inode)
+ goto out_release;
+ if (!(sb = nd.path.dentry->d_inode->i_sb))
+ goto out_release;
+
+ if (add) {
+ dli = __alloc_dl_info(sb, id);
+ spin_lock(&dl_info_hash_lock);
+
+ ret = -EEXIST;
+ if (__lookup_dl_info(sb, id))
+ goto out_unlock;
+ __hash_dl_info(dli);
+ dli = NULL;
+ } else {
+ spin_lock(&dl_info_hash_lock);
+ dli = __lookup_dl_info(sb, id);
+
+ ret = -ESRCH;
+ if (!dli)
+ goto out_unlock;
+ __unhash_dl_info(dli);
+ }
+ ret = 0;
+ out_unlock:
+ spin_unlock(&dl_info_hash_lock);
+ if (add && dli)
+ __dealloc_dl_info(dli);
+ out_release:
+ path_put(&nd.path);
+ }
+ return ret;
+}
+
+int vc_add_dlimit(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_base_v0 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 1);
+}
+
+int vc_rem_dlimit(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_base_v0 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_addrem_dlimit(id, vc_data.name, vc_data.flags, 0);
+}
+
+#ifdef CONFIG_COMPAT
+
+int vc_add_dlimit_x32(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_addrem_dlimit(id,
+ compat_ptr(vc_data.name_ptr), vc_data.flags, 1);
+}
+
+int vc_rem_dlimit_x32(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_base_v0_x32 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_addrem_dlimit(id,
+ compat_ptr(vc_data.name_ptr), vc_data.flags, 0);
+}
+
+#endif /* CONFIG_COMPAT */
+
+
+static inline
+int do_set_dlimit(uint32_t id, const char __user *name,
+ uint32_t space_used, uint32_t space_total,
+ uint32_t inodes_used, uint32_t inodes_total,
+ uint32_t reserved, uint32_t flags)
+{
+ struct nameidata nd;
+ int ret;
+
+ ret = user_path_walk_link(name, &nd);
+ if (!ret) {
+ struct super_block *sb;
+ struct dl_info *dli;
+
+ ret = -EINVAL;
+ if (!nd.path.dentry->d_inode)
+ goto out_release;
+ if (!(sb = nd.path.dentry->d_inode->i_sb))
+ goto out_release;
+ if ((reserved != CDLIM_KEEP &&
+ reserved > 100) ||
+ (inodes_used != CDLIM_KEEP &&
+ inodes_used > inodes_total) ||
+ (space_used != CDLIM_KEEP &&
+ space_used > space_total))
+ goto out_release;
+
+ ret = -ESRCH;
+ dli = locate_dl_info(sb, id);
+ if (!dli)
+ goto out_release;
+
+ spin_lock(&dli->dl_lock);
+
+ if (inodes_used != CDLIM_KEEP)
+ dli->dl_inodes_used = inodes_used;
+ if (inodes_total != CDLIM_KEEP)
+ dli->dl_inodes_total = inodes_total;
+ if (space_used != CDLIM_KEEP) {
+ dli->dl_space_used = space_used;
+ dli->dl_space_used <<= 10;
+ }
+ if (space_total == CDLIM_INFINITY)
+ dli->dl_space_total = DLIM_INFINITY;
+ else if (space_total != CDLIM_KEEP) {
+ dli->dl_space_total = space_total;
+ dli->dl_space_total <<= 10;
+ }
+ if (reserved != CDLIM_KEEP)
+ dli->dl_nrlmult = (1 << 10) * (100 - reserved) / 100;
+
+ spin_unlock(&dli->dl_lock);
+
+ put_dl_info(dli);
+ ret = 0;
+
+ out_release:
+ path_put(&nd.path);
+ }
+ return ret;
+}
+
+int vc_set_dlimit(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_v0 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_set_dlimit(id, vc_data.name,
+ vc_data.space_used, vc_data.space_total,
+ vc_data.inodes_used, vc_data.inodes_total,
+ vc_data.reserved, vc_data.flags);
+}
+
+#ifdef CONFIG_COMPAT
+
+int vc_set_dlimit_x32(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_v0_x32 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_set_dlimit(id, compat_ptr(vc_data.name_ptr),
+ vc_data.space_used, vc_data.space_total,
+ vc_data.inodes_used, vc_data.inodes_total,
+ vc_data.reserved, vc_data.flags);
+}
+
+#endif /* CONFIG_COMPAT */
+
+
+static inline
+int do_get_dlimit(uint32_t id, const char __user *name,
+ uint32_t *space_used, uint32_t *space_total,
+ uint32_t *inodes_used, uint32_t *inodes_total,
+ uint32_t *reserved, uint32_t *flags)
+{
+ struct nameidata nd;
+ int ret;
+
+ ret = user_path_walk_link(name, &nd);
+ if (!ret) {
+ struct super_block *sb;
+ struct dl_info *dli;
+
+ ret = -EINVAL;
+ if (!nd.path.dentry->d_inode)
+ goto out_release;
+ if (!(sb = nd.path.dentry->d_inode->i_sb))
+ goto out_release;
+
+ ret = -ESRCH;
+ dli = locate_dl_info(sb, id);
+ if (!dli)
+ goto out_release;
+
+ spin_lock(&dli->dl_lock);
+ *inodes_used = dli->dl_inodes_used;
+ *inodes_total = dli->dl_inodes_total;
+ *space_used = dli->dl_space_used >> 10;
+ if (dli->dl_space_total == DLIM_INFINITY)
+ *space_total = CDLIM_INFINITY;
+ else
+ *space_total = dli->dl_space_total >> 10;
+
+ *reserved = 100 - ((dli->dl_nrlmult * 100 + 512) >> 10);
+ spin_unlock(&dli->dl_lock);
+
+ put_dl_info(dli);
+ ret = -EFAULT;
+
+ ret = 0;
+ out_release:
+ path_put(&nd.path);
+ }
+ return ret;
+}
+
+
+int vc_get_dlimit(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_v0 vc_data;
+ int ret;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_get_dlimit(id, vc_data.name,
+ &vc_data.space_used, &vc_data.space_total,
+ &vc_data.inodes_used, &vc_data.inodes_total,
+ &vc_data.reserved, &vc_data.flags);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+#ifdef CONFIG_COMPAT
+
+int vc_get_dlimit_x32(uint32_t id, void __user *data)
+{
+ struct vcmd_ctx_dlimit_v0_x32 vc_data;
+ int ret;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_get_dlimit(id, compat_ptr(vc_data.name_ptr),
+ &vc_data.space_used, &vc_data.space_total,
+ &vc_data.inodes_used, &vc_data.inodes_total,
+ &vc_data.reserved, &vc_data.flags);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+#endif /* CONFIG_COMPAT */
+
+
+void vx_vsi_statfs(struct super_block *sb, struct kstatfs *buf)
+{
+ struct dl_info *dli;
+ __u64 blimit, bfree, bavail;
+ __u32 ifree;
+
+ dli = locate_dl_info(sb, dx_current_tag());
+ if (!dli)
+ return;
+
+ spin_lock(&dli->dl_lock);
+ if (dli->dl_inodes_total == (unsigned long)DLIM_INFINITY)
+ goto no_ilim;
+
+ /* reduce max inodes available to limit */
+ if (buf->f_files > dli->dl_inodes_total)
+ buf->f_files = dli->dl_inodes_total;
+
+ ifree = dli->dl_inodes_total - dli->dl_inodes_used;
+ /* reduce free inodes to min */
+ if (ifree < buf->f_ffree)
+ buf->f_ffree = ifree;
+
+no_ilim:
+ if (dli->dl_space_total == DLIM_INFINITY)
+ goto no_blim;
+
+ blimit = dli->dl_space_total >> sb->s_blocksize_bits;
+
+ if (dli->dl_space_total < dli->dl_space_used)
+ bfree = 0;
+ else
+ bfree = (dli->dl_space_total - dli->dl_space_used)
+ >> sb->s_blocksize_bits;
+
+ bavail = ((dli->dl_space_total >> 10) * dli->dl_nrlmult);
+ if (bavail < dli->dl_space_used)
+ bavail = 0;
+ else
+ bavail = (bavail - dli->dl_space_used)
+ >> sb->s_blocksize_bits;
+
+ /* reduce max space available to limit */
+ if (buf->f_blocks > blimit)
+ buf->f_blocks = blimit;
+
+ /* reduce free space to min */
+ if (bfree < buf->f_bfree)
+ buf->f_bfree = bfree;
+
+ /* reduce avail space to min */
+ if (bavail < buf->f_bavail)
+ buf->f_bavail = bavail;
+
+no_blim:
+ spin_unlock(&dli->dl_lock);
+ put_dl_info(dli);
+
+ return;
+}
+
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(locate_dl_info);
+EXPORT_SYMBOL_GPL(rcu_free_dl_info);
+
--- a/kernel/vserver/helper.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/helper.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,199 @@
+/*
+ * linux/kernel/vserver/helper.c
+ *
+ * Virtual Context Support
+ *
+ * Copyright (C) 2004-2007 Herbert P<>tzl
+ *
+ * V0.01 basic helper
+ *
+ */
+
+#include <linux/kmod.h>
+#include <linux/reboot.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vserver/signal.h>
+
+
+char vshelper_path[255] = "/sbin/vshelper";
+
+
+static int do_vshelper(char *name, char *argv[], char *envp[], int sync)
+{
+ int ret;
+
+ if ((ret = call_usermodehelper(name, argv, envp, sync))) {
+ printk( KERN_WARNING
+ "%s: (%s %s) returned %s with %d\n",
+ name, argv[1], argv[2],
+ sync ? "sync" : "async", ret);
+ }
+ vxdprintk(VXD_CBIT(switch, 4),
+ "%s: (%s %s) returned %s with %d",
+ name, argv[1], argv[2], sync ? "sync" : "async", ret);
+ return ret;
+}
+
+/*
+ * vshelper path is set via /proc/sys
+ * invoked by vserver sys_reboot(), with
+ * the following arguments
+ *
+ * argv [0] = vshelper_path;
+ * argv [1] = action: "restart", "halt", "poweroff", ...
+ * argv [2] = context identifier
+ *
+ * envp [*] = type-specific parameters
+ */
+
+long vs_reboot_helper(struct vx_info *vxi, int cmd, void __user *arg)
+{
+ char id_buf[8], cmd_buf[16];
+ char uid_buf[16], pid_buf[16];
+ int ret;
+
+ char *argv[] = {vshelper_path, NULL, id_buf, 0};
+ char *envp[] = {"HOME=/", "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
+ uid_buf, pid_buf, cmd_buf, 0};
+
+ if (vx_info_state(vxi, VXS_HELPER))
+ return -EAGAIN;
+ vxi->vx_state |= VXS_HELPER;
+
+ snprintf(id_buf, sizeof(id_buf)-1, "%d", vxi->vx_id);
+
+ snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
+ snprintf(uid_buf, sizeof(uid_buf)-1, "VS_UID=%d", current->uid);
+ snprintf(pid_buf, sizeof(pid_buf)-1, "VS_PID=%d", current->pid);
+
+ switch (cmd) {
+ case LINUX_REBOOT_CMD_RESTART:
+ argv[1] = "restart";
+ break;
+
+ case LINUX_REBOOT_CMD_HALT:
+ argv[1] = "halt";
+ break;
+
+ case LINUX_REBOOT_CMD_POWER_OFF:
+ argv[1] = "poweroff";
+ break;
+
+ case LINUX_REBOOT_CMD_SW_SUSPEND:
+ argv[1] = "swsusp";
+ break;
+
+ default:
+ vxi->vx_state &= ~VXS_HELPER;
+ return 0;
+ }
+
+ ret = do_vshelper(vshelper_path, argv, envp, 0);
+ vxi->vx_state &= ~VXS_HELPER;
+ __wakeup_vx_info(vxi);
+ return (ret) ? -EPERM : 0;
+}
+
+
+long vs_reboot(unsigned int cmd, void __user *arg)
+{
+ struct vx_info *vxi = current->vx_info;
+ long ret = 0;
+
+ vxdprintk(VXD_CBIT(misc, 5),
+ "vs_reboot(%p[#%d],%d)",
+ vxi, vxi ? vxi->vx_id : 0, cmd);
+
+ ret = vs_reboot_helper(vxi, cmd, arg);
+ if (ret)
+ return ret;
+
+ vxi->reboot_cmd = cmd;
+ if (vx_info_flags(vxi, VXF_REBOOT_KILL, 0)) {
+ switch (cmd) {
+ case LINUX_REBOOT_CMD_RESTART:
+ case LINUX_REBOOT_CMD_HALT:
+ case LINUX_REBOOT_CMD_POWER_OFF:
+ vx_info_kill(vxi, 0, SIGKILL);
+ vx_info_kill(vxi, 1, SIGKILL);
+ default:
+ break;
+ }
+ }
+ return 0;
+}
+
+
+/*
+ * argv [0] = vshelper_path;
+ * argv [1] = action: "startup", "shutdown"
+ * argv [2] = context identifier
+ *
+ * envp [*] = type-specific parameters
+ */
+
+long vs_state_change(struct vx_info *vxi, unsigned int cmd)
+{
+ char id_buf[8], cmd_buf[16];
+ char *argv[] = {vshelper_path, NULL, id_buf, 0};
+ char *envp[] = {"HOME=/", "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
+
+ if (!vx_info_flags(vxi, VXF_SC_HELPER, 0))
+ return 0;
+
+ snprintf(id_buf, sizeof(id_buf)-1, "%d", vxi->vx_id);
+ snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
+
+ switch (cmd) {
+ case VSC_STARTUP:
+ argv[1] = "startup";
+ break;
+ case VSC_SHUTDOWN:
+ argv[1] = "shutdown";
+ break;
+ default:
+ return 0;
+ }
+
+ return do_vshelper(vshelper_path, argv, envp, 1);
+}
+
+
+/*
+ * argv [0] = vshelper_path;
+ * argv [1] = action: "netup", "netdown"
+ * argv [2] = context identifier
+ *
+ * envp [*] = type-specific parameters
+ */
+
+long vs_net_change(struct nx_info *nxi, unsigned int cmd)
+{
+ char id_buf[8], cmd_buf[16];
+ char *argv[] = {vshelper_path, NULL, id_buf, 0};
+ char *envp[] = {"HOME=/", "TERM=linux",
+ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", cmd_buf, 0};
+
+ if (!nx_info_flags(nxi, NXF_SC_HELPER, 0))
+ return 0;
+
+ snprintf(id_buf, sizeof(id_buf)-1, "%d", nxi->nx_id);
+ snprintf(cmd_buf, sizeof(cmd_buf)-1, "VS_CMD=%08x", cmd);
+
+ switch (cmd) {
+ case VSC_NETUP:
+ argv[1] = "netup";
+ break;
+ case VSC_NETDOWN:
+ argv[1] = "netdown";
+ break;
+ default:
+ return 0;
+ }
+
+ return do_vshelper(vshelper_path, argv, envp, 1);
+}
+
--- a/kernel/vserver/history.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/history.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,258 @@
+/*
+ * kernel/vserver/history.c
+ *
+ * Virtual Context History Backtrace
+ *
+ * Copyright (C) 2004-2007 Herbert P<>tzl
+ *
+ * V0.01 basic structure
+ * V0.02 hash/unhash and trace
+ * V0.03 preemption fixes
+ *
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+
+#include <linux/vserver/context.h>
+#include <linux/vserver/debug.h>
+#include <linux/vserver/debug_cmd.h>
+#include <linux/vserver/history.h>
+
+
+#ifdef CONFIG_VSERVER_HISTORY
+#define VXH_SIZE CONFIG_VSERVER_HISTORY_SIZE
+#else
+#define VXH_SIZE 64
+#endif
+
+struct _vx_history {
+ unsigned int counter;
+
+ struct _vx_hist_entry entry[VXH_SIZE + 1];
+};
+
+
+DEFINE_PER_CPU(struct _vx_history, vx_history_buffer);
+
+unsigned volatile int vxh_active = 1;
+
+static atomic_t sequence = ATOMIC_INIT(0);
+
+
+/* vxh_advance()
+
+ * requires disabled preemption */
+
+struct _vx_hist_entry *vxh_advance(void *loc)
+{
+ unsigned int cpu = smp_processor_id();
+ struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
+ struct _vx_hist_entry *entry;
+ unsigned int index;
+
+ index = vxh_active ? (hist->counter++ % VXH_SIZE) : VXH_SIZE;
+ entry = &hist->entry[index];
+
+ entry->seq = atomic_inc_return(&sequence);
+ entry->loc = loc;
+ return entry;
+}
+
+EXPORT_SYMBOL_GPL(vxh_advance);
+
+
+#define VXH_LOC_FMTS "(#%04x,*%d):%p"
+
+#define VXH_LOC_ARGS(e) (e)->seq, cpu, (e)->loc
+
+
+#define VXH_VXI_FMTS "%p[#%d,%d.%d]"
+
+#define VXH_VXI_ARGS(e) (e)->vxi.ptr, \
+ (e)->vxi.ptr ? (e)->vxi.xid : 0, \
+ (e)->vxi.ptr ? (e)->vxi.usecnt : 0, \
+ (e)->vxi.ptr ? (e)->vxi.tasks : 0
+
+void vxh_dump_entry(struct _vx_hist_entry *e, unsigned cpu)
+{
+ switch (e->type) {
+ case VXH_THROW_OOPS:
+ printk( VXH_LOC_FMTS " oops \n", VXH_LOC_ARGS(e));
+ break;
+
+ case VXH_GET_VX_INFO:
+ case VXH_PUT_VX_INFO:
+ printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
+ VXH_LOC_ARGS(e),
+ (e->type == VXH_GET_VX_INFO) ? "get" : "put",
+ VXH_VXI_ARGS(e));
+ break;
+
+ case VXH_INIT_VX_INFO:
+ case VXH_SET_VX_INFO:
+ case VXH_CLR_VX_INFO:
+ printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
+ VXH_LOC_ARGS(e),
+ (e->type == VXH_INIT_VX_INFO) ? "init" :
+ ((e->type == VXH_SET_VX_INFO) ? "set" : "clr"),
+ VXH_VXI_ARGS(e), e->sc.data);
+ break;
+
+ case VXH_CLAIM_VX_INFO:
+ case VXH_RELEASE_VX_INFO:
+ printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS " @%p\n",
+ VXH_LOC_ARGS(e),
+ (e->type == VXH_CLAIM_VX_INFO) ? "claim" : "release",
+ VXH_VXI_ARGS(e), e->sc.data);
+ break;
+
+ case VXH_ALLOC_VX_INFO:
+ case VXH_DEALLOC_VX_INFO:
+ printk( VXH_LOC_FMTS " %s_vx_info " VXH_VXI_FMTS "\n",
+ VXH_LOC_ARGS(e),
+ (e->type == VXH_ALLOC_VX_INFO) ? "alloc" : "dealloc",
+ VXH_VXI_ARGS(e));
+ break;
+
+ case VXH_HASH_VX_INFO:
+ case VXH_UNHASH_VX_INFO:
+ printk( VXH_LOC_FMTS " __%s_vx_info " VXH_VXI_FMTS "\n",
+ VXH_LOC_ARGS(e),
+ (e->type == VXH_HASH_VX_INFO) ? "hash" : "unhash",
+ VXH_VXI_ARGS(e));
+ break;
+
+ case VXH_LOC_VX_INFO:
+ case VXH_LOOKUP_VX_INFO:
+ case VXH_CREATE_VX_INFO:
+ printk( VXH_LOC_FMTS " __%s_vx_info [#%d] -> " VXH_VXI_FMTS "\n",
+ VXH_LOC_ARGS(e),
+ (e->type == VXH_CREATE_VX_INFO) ? "create" :
+ ((e->type == VXH_LOC_VX_INFO) ? "loc" : "lookup"),
+ e->ll.arg, VXH_VXI_ARGS(e));
+ break;
+ }
+}
+
+static void __vxh_dump_history(void)
+{
+ unsigned int i, cpu;
+
+ printk("History:\tSEQ: %8x\tNR_CPUS: %d\n",
+ atomic_read(&sequence), NR_CPUS);
+
+ for (i = 0; i < VXH_SIZE; i++) {
+ for_each_online_cpu(cpu) {
+ struct _vx_history *hist =
+ &per_cpu(vx_history_buffer, cpu);
+ unsigned int index = (hist->counter - i) % VXH_SIZE;
+ struct _vx_hist_entry *entry = &hist->entry[index];
+
+ vxh_dump_entry(entry, cpu);
+ }
+ }
+}
+
+void vxh_dump_history(void)
+{
+ vxh_active = 0;
+#ifdef CONFIG_SMP
+ local_irq_enable();
+ smp_send_stop();
+ local_irq_disable();
+#endif
+ __vxh_dump_history();
+}
+
+
+/* vserver syscall commands below here */
+
+
+int vc_dump_history(uint32_t id)
+{
+ vxh_active = 0;
+ __vxh_dump_history();
+ vxh_active = 1;
+
+ return 0;
+}
+
+
+int do_read_history(struct __user _vx_hist_entry *data,
+ int cpu, uint32_t *index, uint32_t *count)
+{
+ int pos, ret = 0;
+ struct _vx_history *hist = &per_cpu(vx_history_buffer, cpu);
+ int end = hist->counter;
+ int start = end - VXH_SIZE + 2;
+ int idx = *index;
+
+ /* special case: get current pos */
+ if (!*count) {
+ *index = end;
+ return 0;
+ }
+
+ /* have we lost some data? */
+ if (idx < start)
+ idx = start;
+
+ for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) {
+ struct _vx_hist_entry *entry =
+ &hist->entry[idx % VXH_SIZE];
+
+ /* send entry to userspace */
+ ret = copy_to_user(&data[pos], entry, sizeof(*entry));
+ if (ret)
+ break;
+ }
+ /* save new index and count */
+ *index = idx;
+ *count = pos;
+ return ret ? ret : (*index < end);
+}
+
+int vc_read_history(uint32_t id, void __user *data)
+{
+ struct vcmd_read_history_v0 vc_data;
+ int ret;
+
+ if (id >= NR_CPUS)
+ return -EINVAL;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_read_history((struct __user _vx_hist_entry *)vc_data.data,
+ id, &vc_data.index, &vc_data.count);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+int vc_read_history_x32(uint32_t id, void __user *data)
+{
+ struct vcmd_read_history_v0_x32 vc_data;
+ int ret;
+
+ if (id >= NR_CPUS)
+ return -EINVAL;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_read_history((struct __user _vx_hist_entry *)
+ compat_ptr(vc_data.data_ptr),
+ id, &vc_data.index, &vc_data.count);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
--- a/kernel/vserver/inet.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/inet.c 2008-05-29 18:56:59.000000000 -0400
@@ -0,0 +1,225 @@
+
+#include <linux/in.h>
+#include <linux/inetdevice.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
+#include <linux/vserver/debug.h>
+#include <net/route.h>
+#include <net/addrconf.h>
+
+
+int nx_v4_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
+{
+ int ret = 0;
+
+ if (!nxi1 || !nxi2 || nxi1 == nxi2)
+ ret = 1;
+ else {
+ struct nx_addr_v4 *ptr;
+
+ for (ptr = &nxi1->v4; ptr; ptr = ptr->next) {
+ if (v4_nx_addr_in_nx_info(nxi2, ptr, -1)) {
+ ret = 1;
+ break;
+ }
+ }
+ }
+
+ vxdprintk(VXD_CBIT(net, 2),
+ "nx_v4_addr_conflict(%p,%p): %d",
+ nxi1, nxi2, ret);
+
+ return ret;
+}
+
+
+#ifdef CONFIG_IPV6
+
+int nx_v6_addr_conflict(struct nx_info *nxi1, struct nx_info *nxi2)
+{
+ int ret = 0;
+
+ if (!nxi1 || !nxi2 || nxi1 == nxi2)
+ ret = 1;
+ else {
+ struct nx_addr_v6 *ptr;
+
+ for (ptr = &nxi1->v6; ptr; ptr = ptr->next) {
+ if (v6_nx_addr_in_nx_info(nxi2, ptr, -1)) {
+ ret = 1;
+ break;
+ }
+ }
+ }
+
+ vxdprintk(VXD_CBIT(net, 2),
+ "nx_v6_addr_conflict(%p,%p): %d",
+ nxi1, nxi2, ret);
+
+ return ret;
+}
+
+#endif
+
+int v4_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
+{
+ struct in_device *in_dev;
+ struct in_ifaddr **ifap;
+ struct in_ifaddr *ifa;
+ int ret = 0;
+
+ if (!dev)
+ goto out;
+ in_dev = in_dev_get(dev);
+ if (!in_dev)
+ goto out;
+
+ for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
+ ifap = &ifa->ifa_next) {
+ if (v4_addr_in_nx_info(nxi, ifa->ifa_local, NXA_MASK_SHOW)) {
+ ret = 1;
+ break;
+ }
+ }
+ in_dev_put(in_dev);
+out:
+ return ret;
+}
+
+
+#ifdef CONFIG_IPV6
+
+int v6_dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
+{
+ struct inet6_dev *in_dev;
+ struct inet6_ifaddr **ifap;
+ struct inet6_ifaddr *ifa;
+ int ret = 0;
+
+ if (!dev)
+ goto out;
+ in_dev = in6_dev_get(dev);
+ if (!in_dev)
+ goto out;
+
+ for (ifap = &in_dev->addr_list; (ifa = *ifap) != NULL;
+ ifap = &ifa->if_next) {
+ if (v6_addr_in_nx_info(nxi, &ifa->addr, -1)) {
+ ret = 1;
+ break;
+ }
+ }
+ in6_dev_put(in_dev);
+out:
+ return ret;
+}
+
+#endif
+
+int dev_in_nx_info(struct net_device *dev, struct nx_info *nxi)
+{
+ int ret = 1;
+
+ if (!nxi)
+ goto out;
+ if (nxi->v4.type && v4_dev_in_nx_info(dev, nxi))
+ goto out;
+#ifdef CONFIG_IPV6
+ ret = 2;
+ if (nxi->v6.type && v6_dev_in_nx_info(dev, nxi))
+ goto out;
+#endif
+ ret = 0;
+out:
+ vxdprintk(VXD_CBIT(net, 3),
+ "dev_in_nx_info(%p,%p[#%d]) = %d",
+ dev, nxi, nxi ? nxi->nx_id : 0, ret);
+ return ret;
+}
+
+int ip_v4_find_src(struct net *net, struct nx_info *nxi,
+ struct rtable **rp, struct flowi *fl)
+{
+ if (!nxi)
+ return 0;
+
+ /* FIXME: handle lback only case */
+ if (!NX_IPV4(nxi))
+ return -EPERM;
+
+ vxdprintk(VXD_CBIT(net, 4),
+ "ip_v4_find_src(%p[#%u]) " NIPQUAD_FMT " -> " NIPQUAD_FMT,
+ nxi, nxi ? nxi->nx_id : 0,
+ NIPQUAD(fl->fl4_src), NIPQUAD(fl->fl4_dst));
+
+ /* single IP is unconditional */
+ if (nx_info_flags(nxi, NXF_SINGLE_IP, 0) &&
+ (fl->fl4_src == INADDR_ANY))
+ fl->fl4_src = nxi->v4.ip[0].s_addr;
+
+ if (fl->fl4_src == INADDR_ANY) {
+ struct nx_addr_v4 *ptr;
+ __be32 found = 0;
+ int err;
+
+ err = __ip_route_output_key(net, rp, fl);
+ if (!err) {
+ found = (*rp)->rt_src;
+ ip_rt_put(*rp);
+ vxdprintk(VXD_CBIT(net, 4),
+ "ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
+ nxi, nxi ? nxi->nx_id : 0, fl->oif, NIPQUAD(found));
+ if (v4_addr_in_nx_info(nxi, found, NXA_MASK_BIND))
+ goto found;
+ }
+
+ for (ptr = &nxi->v4; ptr; ptr = ptr->next) {
+ __be32 primary = ptr->ip[0].s_addr;
+ __be32 mask = ptr->mask.s_addr;
+ __be32 neta = primary & mask;
+
+ vxdprintk(VXD_CBIT(net, 4), "ip_v4_find_src(%p[#%u]) chk: "
+ NIPQUAD_FMT "/" NIPQUAD_FMT "/" NIPQUAD_FMT,
+ nxi, nxi ? nxi->nx_id : 0, NIPQUAD(primary),
+ NIPQUAD(mask), NIPQUAD(neta));
+ if ((found & mask) != neta)
+ continue;
+
+ fl->fl4_src = primary;
+ err = __ip_route_output_key(net, rp, fl);
+ vxdprintk(VXD_CBIT(net, 4),
+ "ip_v4_find_src(%p[#%u]) rok[%u]: " NIPQUAD_FMT,
+ nxi, nxi ? nxi->nx_id : 0, fl->oif, NIPQUAD(primary));
+ if (!err) {
+ found = (*rp)->rt_src;
+ ip_rt_put(*rp);
+ if (found == primary)
+ goto found;
+ }
+ }
+ /* still no source ip? */
+ found = ipv4_is_loopback(fl->fl4_dst)
+ ? IPI_LOOPBACK : nxi->v4.ip[0].s_addr;
+ found:
+ /* assign src ip to flow */
+ fl->fl4_src = found;
+
+ } else {
+ if (!v4_addr_in_nx_info(nxi, fl->fl4_src, NXA_MASK_BIND))
+ return -EPERM;
+ }
+
+ if (nx_info_flags(nxi, NXF_LBACK_REMAP, 0)) {
+ if (ipv4_is_loopback(fl->fl4_dst))
+ fl->fl4_dst = nxi->v4_lback.s_addr;
+ if (ipv4_is_loopback(fl->fl4_src))
+ fl->fl4_src = nxi->v4_lback.s_addr;
+ } else if (ipv4_is_loopback(fl->fl4_dst) &&
+ !nx_info_flags(nxi, NXF_LBACK_ALLOW, 0))
+ return -EPERM;
+
+ return 0;
+}
+
+EXPORT_SYMBOL_GPL(ip_v4_find_src);
+
--- a/kernel/vserver/init.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/init.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,45 @@
+/*
+ * linux/kernel/init.c
+ *
+ * Virtual Server Init
+ *
+ * Copyright (C) 2004-2007 Herbert P<>tzl
+ *
+ * V0.01 basic structure
+ *
+ */
+
+#include <linux/init.h>
+
+int vserver_register_sysctl(void);
+void vserver_unregister_sysctl(void);
+
+
+static int __init init_vserver(void)
+{
+ int ret = 0;
+
+#ifdef CONFIG_VSERVER_DEBUG
+ vserver_register_sysctl();
+#endif
+ return ret;
+}
+
+
+static void __exit exit_vserver(void)
+{
+
+#ifdef CONFIG_VSERVER_DEBUG
+ vserver_unregister_sysctl();
+#endif
+ return;
+}
+
+/* FIXME: GFP_ZONETYPES gone
+long vx_slab[GFP_ZONETYPES]; */
+long vx_area;
+
+
+module_init(init_vserver);
+module_exit(exit_vserver);
+
--- a/kernel/vserver/inode.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/inode.c 2008-04-21 16:52:16.000000000 -0400
@@ -0,0 +1,409 @@
+/*
+ * linux/kernel/vserver/inode.c
+ *
+ * Virtual Server: File System Support
+ *
+ * Copyright (C) 2004-2007 Herbert P<>tzl
+ *
+ * V0.01 separated from vcontext V0.05
+ * V0.02 moved to tag (instead of xid)
+ *
+ */
+
+#include <linux/tty.h>
+#include <linux/proc_fs.h>
+#include <linux/devpts_fs.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/parser.h>
+#include <linux/vserver/inode.h>
+#include <linux/vserver/inode_cmd.h>
+#include <linux/vs_base.h>
+#include <linux/vs_tag.h>
+
+#include <asm/uaccess.h>
+
+
+static int __vc_get_iattr(struct inode *in, uint32_t *tag, uint32_t *flags, uint32_t *mask)
+{
+ struct proc_dir_entry *entry;
+
+ if (!in || !in->i_sb)
+ return -ESRCH;
+
+ *flags = IATTR_TAG
+ | (IS_BARRIER(in) ? IATTR_BARRIER : 0)
+ | (IS_IUNLINK(in) ? IATTR_IUNLINK : 0)
+ | (IS_IMMUTABLE(in) ? IATTR_IMMUTABLE : 0);
+ *mask = IATTR_IUNLINK | IATTR_IMMUTABLE;
+
+ if (S_ISDIR(in->i_mode))
+ *mask |= IATTR_BARRIER;
+
+ if (IS_TAGGED(in)) {
+ *tag = in->i_tag;
+ *mask |= IATTR_TAG;
+ }
+
+ switch (in->i_sb->s_magic) {
+ case PROC_SUPER_MAGIC:
+ entry = PROC_I(in)->pde;
+
+ /* check for specific inodes? */
+ if (entry)
+ *mask |= IATTR_FLAGS;
+ if (entry)
+ *flags |= (entry->vx_flags & IATTR_FLAGS);
+ else
+ *flags |= (PROC_I(in)->vx_flags & IATTR_FLAGS);
+ break;
+
+ case DEVPTS_SUPER_MAGIC:
+ *tag = in->i_tag;
+ *mask |= IATTR_TAG;
+ break;
+
+ default:
+ break;
+ }
+ return 0;
+}
+
+int vc_get_iattr(void __user *data)
+{
+ struct nameidata nd;
+ struct vcmd_ctx_iattr_v1 vc_data = { .tag = -1 };
+ int ret;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = user_path_walk_link(vc_data.name, &nd);
+ if (!ret) {
+ ret = __vc_get_iattr(nd.path.dentry->d_inode,
+ &vc_data.tag, &vc_data.flags, &vc_data.mask);
+ path_put(&nd.path);
+ }
+ if (ret)
+ return ret;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ ret = -EFAULT;
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+int vc_get_iattr_x32(void __user *data)
+{
+ struct nameidata nd;
+ struct vcmd_ctx_iattr_v1_x32 vc_data = { .tag = -1 };
+ int ret;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = user_path_walk_link(compat_ptr(vc_data.name_ptr), &nd);
+ if (!ret) {
+ ret = __vc_get_iattr(nd.path.dentry->d_inode,
+ &vc_data.tag, &vc_data.flags, &vc_data.mask);
+ path_put(&nd.path);
+ }
+ if (ret)
+ return ret;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ ret = -EFAULT;
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+
+int vc_fget_iattr(uint32_t fd, void __user *data)
+{
+ struct file *filp;
+ struct vcmd_ctx_fiattr_v0 vc_data = { .tag = -1 };
+ int ret;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ filp = fget(fd);
+ if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
+ return -EBADF;
+
+ ret = __vc_get_iattr(filp->f_dentry->d_inode,
+ &vc_data.tag, &vc_data.flags, &vc_data.mask);
+
+ fput(filp);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ ret = -EFAULT;
+ return ret;
+}
+
+
+static int __vc_set_iattr(struct dentry *de, uint32_t *tag, uint32_t *flags, uint32_t *mask)
+{
+ struct inode *in = de->d_inode;
+ int error = 0, is_proc = 0, has_tag = 0;
+ struct iattr attr = { 0 };
+
+ if (!in || !in->i_sb)
+ return -ESRCH;
+
+ is_proc = (in->i_sb->s_magic == PROC_SUPER_MAGIC);
+ if ((*mask & IATTR_FLAGS) && !is_proc)
+ return -EINVAL;
+
+ has_tag = IS_TAGGED(in) ||
+ (in->i_sb->s_magic == DEVPTS_SUPER_MAGIC);
+ if ((*mask & IATTR_TAG) && !has_tag)
+ return -EINVAL;
+
+ mutex_lock(&in->i_mutex);
+ if (*mask & IATTR_TAG) {
+ attr.ia_tag = *tag;
+ attr.ia_valid |= ATTR_TAG;
+ }
+
+ if (*mask & IATTR_FLAGS) {
+ struct proc_dir_entry *entry = PROC_I(in)->pde;
+ unsigned int iflags = PROC_I(in)->vx_flags;
+
+ iflags = (iflags & ~(*mask & IATTR_FLAGS))
+ | (*flags & IATTR_FLAGS);
+ PROC_I(in)->vx_flags = iflags;
+ if (entry)
+ entry->vx_flags = iflags;
+ }
+
+ if (*mask & (IATTR_BARRIER | IATTR_IUNLINK | IATTR_IMMUTABLE)) {
+ if (*mask & IATTR_IMMUTABLE) {
+ if (*flags & IATTR_IMMUTABLE)
+ in->i_flags |= S_IMMUTABLE;
+ else
+ in->i_flags &= ~S_IMMUTABLE;
+ }
+ if (*mask & IATTR_IUNLINK) {
+ if (*flags & IATTR_IUNLINK)
+ in->i_flags |= S_IUNLINK;
+ else
+ in->i_flags &= ~S_IUNLINK;
+ }
+ if (S_ISDIR(in->i_mode) && (*mask & IATTR_BARRIER)) {
+ if (*flags & IATTR_BARRIER)
+ in->i_flags |= S_BARRIER;
+ else
+ in->i_flags &= ~S_BARRIER;
+ }
+ if (in->i_op && in->i_op->sync_flags) {
+ error = in->i_op->sync_flags(in);
+ if (error)
+ goto out;
+ }
+ }
+
+ if (attr.ia_valid) {
+ if (in->i_op && in->i_op->setattr)
+ error = in->i_op->setattr(de, &attr);
+ else {
+ error = inode_change_ok(in, &attr);
+ if (!error)
+ error = inode_setattr(in, &attr);
+ }
+ }
+
+out:
+ mutex_unlock(&in->i_mutex);
+ return error;
+}
+
+int vc_set_iattr(void __user *data)
+{
+ struct nameidata nd;
+ struct vcmd_ctx_iattr_v1 vc_data;
+ int ret;
+
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = user_path_walk_link(vc_data.name, &nd);
+ if (!ret) {
+ ret = __vc_set_iattr(nd.path.dentry,
+ &vc_data.tag, &vc_data.flags, &vc_data.mask);
+ path_put(&nd.path);
+ }
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ ret = -EFAULT;
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+int vc_set_iattr_x32(void __user *data)
+{
+ struct nameidata nd;
+ struct vcmd_ctx_iattr_v1_x32 vc_data;
+ int ret;
+
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = user_path_walk_link(compat_ptr(vc_data.name_ptr), &nd);
+ if (!ret) {
+ ret = __vc_set_iattr(nd.path.dentry,
+ &vc_data.tag, &vc_data.flags, &vc_data.mask);
+ path_put(&nd.path);
+ }
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ ret = -EFAULT;
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+int vc_fset_iattr(uint32_t fd, void __user *data)
+{
+ struct file *filp;
+ struct vcmd_ctx_fiattr_v0 vc_data;
+ int ret;
+
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ filp = fget(fd);
+ if (!filp || !filp->f_dentry || !filp->f_dentry->d_inode)
+ return -EBADF;
+
+ ret = __vc_set_iattr(filp->f_dentry, &vc_data.tag,
+ &vc_data.flags, &vc_data.mask);
+
+ fput(filp);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return ret;
+}
+
+
+enum { Opt_notagcheck, Opt_tag, Opt_notag, Opt_tagid, Opt_err };
+
+static match_table_t tokens = {
+ {Opt_notagcheck, "notagcheck"},
+#ifdef CONFIG_PROPAGATE
+ {Opt_notag, "notag"},
+ {Opt_tag, "tag"},
+ {Opt_tagid, "tagid=%u"},
+#endif
+ {Opt_err, NULL}
+};
+
+
+static void __dx_parse_remove(char *string, char *opt)
+{
+ char *p = strstr(string, opt);
+ char *q = p;
+
+ if (p) {
+ while (*q != '\0' && *q != ',')
+ q++;
+ while (*q)
+ *p++ = *q++;
+ while (*p)
+ *p++ = '\0';
+ }
+}
+
+static inline
+int __dx_parse_tag(char *string, tag_t *tag, int remove)
+{
+ substring_t args[MAX_OPT_ARGS];
+ int token, option = 0;
+
+ if (!string)
+ return 0;
+
+ token = match_token(string, tokens, args);
+
+ vxdprintk(VXD_CBIT(tag, 7),
+ "dx_parse_tag(<28>%s<>): %d:#%d",
+ string, token, option);
+
+ switch (token) {
+ case Opt_tag:
+ if (tag)
+ *tag = 0;
+ if (remove)
+ __dx_parse_remove(string, "tag");
+ return MNT_TAGID;
+ case Opt_notag:
+ if (remove)
+ __dx_parse_remove(string, "notag");
+ return MNT_NOTAG;
+ case Opt_notagcheck:
+ if (remove)
+ __dx_parse_remove(string, "notagcheck");
+ return MNT_NOTAGCHECK;
+ case Opt_tagid:
+ if (tag && !match_int(args, &option))
+ *tag = option;
+ if (remove)
+ __dx_parse_remove(string, "tagid");
+ return MNT_TAGID;
+ }
+ return 0;
+}
+
+int dx_parse_tag(char *string, tag_t *tag, int remove)
+{
+ int retval, flags = 0;
+
+ while ((retval = __dx_parse_tag(string, tag, remove)))
+ flags |= retval;
+ return flags;
+}
+
+#ifdef CONFIG_PROPAGATE
+
+void __dx_propagate_tag(struct nameidata *nd, struct inode *inode)
+{
+ tag_t new_tag = 0;
+ struct vfsmount *mnt;
+ int propagate;
+
+ if (!nd)
+ return;
+ mnt = nd->path.mnt;
+ if (!mnt)
+ return;
+
+ propagate = (mnt->mnt_flags & MNT_TAGID);
+ if (propagate)
+ new_tag = mnt->mnt_tag;
+
+ vxdprintk(VXD_CBIT(tag, 7),
+ "dx_propagate_tag(%p[#%lu.%d]): %d,%d",
+ inode, inode->i_ino, inode->i_tag,
+ new_tag, (propagate) ? 1 : 0);
+
+ if (propagate)
+ inode->i_tag = new_tag;
+}
+
+#include <linux/module.h>
+
+EXPORT_SYMBOL_GPL(__dx_propagate_tag);
+
+#endif /* CONFIG_PROPAGATE */
+
--- a/kernel/vserver/Kconfig 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/Kconfig 2008-05-21 15:02:48.000000000 -0400
@@ -0,0 +1,252 @@
+#
+# Linux VServer configuration
+#
+
+menu "Linux VServer"
+
+config VSERVER_AUTO_LBACK
+ bool "Automatically Assign Loopback IP"
+ default y
+ help
+ Automatically assign a guest specific loopback
+ IP and add it to the kernel network stack on
+ startup.
+
+config VSERVER_AUTO_SINGLE
+ bool "Automatic Single IP Special Casing"
+ depends on EXPERIMENTAL
+ default y
+ help
+ This allows network contexts with a single IP to
+ automatically remap 0.0.0.0 bindings to that IP,
+ avoiding further network checks and improving
+ performance.
+
+ (note: such guests do not allow to change the ip
+ on the fly and do not show loopback addresses)
+
+config VSERVER_COWBL
+ bool "Enable COW Immutable Link Breaking"
+ default y
+ help
+ This enables the COW (Copy-On-Write) link break code.
+ It allows you to treat unified files like normal files
+ when writing to them (which will implicitely break the
+ link and create a copy of the unified file)
+
+config VSERVER_VTIME
+ bool "Enable Virtualized Guest Time"
+ depends on EXPERIMENTAL
+ default n
+ help
+ This enables per guest time offsets to allow for
+ adjusting the system clock individually per guest.
+ this adds some overhead to the time functions and
+ therefore should not be enabled without good reason.
+
+config VSERVER_DEVICE
+ bool "Enable Guest Device Mapping"
+ depends on EXPERIMENTAL
+ default n
+ help
+ This enables generic device remapping.
+
+config VSERVER_PROC_SECURE
+ bool "Enable Proc Security"
+ depends on PROC_FS
+ default y
+ help
+ This configures ProcFS security to initially hide
+ non-process entries for all contexts except the main and
+ spectator context (i.e. for all guests), which is a secure
+ default.
+
+ (note: on 1.2x the entries were visible by default)
+
+config VSERVER_HARDCPU
+ bool "Enable Hard CPU Limits"
+ default y
+ help
+ Activate the Hard CPU Limits
+
+ This will compile in code that allows the Token Bucket
+ Scheduler to put processes on hold when a context's
+ tokens are depleted (provided that its per-context
+ sched_hard flag is set).
+
+ Processes belonging to that context will not be able
+ to consume CPU resources again until a per-context
+ configured minimum of tokens has been reached.
+
+config VSERVER_IDLETIME
+ bool "Avoid idle CPUs by skipping Time"
+ depends on VSERVER_HARDCPU
+ default y
+ help
+ This option allows the scheduler to artificially
+ advance time (per cpu) when otherwise the idle
+ task would be scheduled, thus keeping the cpu
+ busy and sharing the available resources among
+ certain contexts.
+
+config VSERVER_IDLELIMIT
+ bool "Limit the IDLE task"
+ depends on VSERVER_HARDCPU
+ default n
+ help
+ Limit the idle slices, so the the next context
+ will be scheduled as soon as possible.
+
+ This might improve interactivity and latency, but
+ will also marginally increase scheduling overhead.
+
+choice
+ prompt "Persistent Inode Tagging"
+ default TAGGING_ID24
+ help
+ This adds persistent context information to filesystems
+ mounted with the tagxid option. Tagging is a requirement
+ for per-context disk limits and per-context quota.
+
+
+config TAGGING_NONE
+ bool "Disabled"
+ help
+ do not store per-context information in inodes.
+
+config TAGGING_UID16
+ bool "UID16/GID32"
+ help
+ reduces UID to 16 bit, but leaves GID at 32 bit.
+
+config TAGGING_GID16
+ bool "UID32/GID16"
+ help
+ reduces GID to 16 bit, but leaves UID at 32 bit.
+
+config TAGGING_ID24
+ bool "UID24/GID24"
+ help
+ uses the upper 8bit from UID and GID for XID tagging
+ which leaves 24bit for UID/GID each, which should be
+ more than sufficient for normal use.
+
+config TAGGING_INTERN
+ bool "UID32/GID32"
+ help
+ this uses otherwise reserved inode fields in the on
+ disk representation, which limits the use to a few
+ filesystems (currently ext2 and ext3)
+
+endchoice
+
+config TAG_NFSD
+ bool "Tag NFSD User Auth and Files"
+ default n
+ help
+ Enable this if you do want the in-kernel NFS
+ Server to use the tagging specified above.
+ (will require patched clients too)
+
+config VSERVER_PRIVACY
+ bool "Honor Privacy Aspects of Guests"
+ default n
+ help
+ When enabled, most context checks will disallow
+ access to structures assigned to a specific context,
+ like ptys or loop devices.
+
+config VSERVER_CONTEXTS
+ int "Maximum number of Contexts (1-65533)" if EMBEDDED
+ range 1 65533
+ default "768" if 64BIT
+ default "256"
+ help
+ This setting will optimize certain data structures
+ and memory allocations according to the expected
+ maximum.
+
+ note: this is not a strict upper limit.
+
+config VSERVER_WARN
+ bool "VServer Warnings"
+ default y
+ help
+ This enables various runtime warnings, which will
+ notify about potential manipulation attempts or
+ resource shortage. It is generally considered to
+ be a good idea to have that enabled.
+
+config VSERVER_DEBUG
+ bool "VServer Debugging Code"
+ default n
+ help
+ Set this to yes if you want to be able to activate
+ debugging output at runtime. It adds a very small
+ overhead to all vserver related functions and
+ increases the kernel size by about 20k.
+
+config VSERVER_HISTORY
+ bool "VServer History Tracing"
+ depends on VSERVER_DEBUG
+ default n
+ help
+ Set this to yes if you want to record the history of
+ linux-vserver activities, so they can be replayed in
+ the event of a kernel panic or oops.
+
+config VSERVER_HISTORY_SIZE
+ int "Per-CPU History Size (32-65536)"
+ depends on VSERVER_HISTORY
+ range 32 65536
+ default 64
+ help
+ This allows you to specify the number of entries in
+ the per-CPU history buffer.
+
+config VSERVER_MONITOR
+ bool "VServer Scheduling Monitor"
+ depends on VSERVER_DISABLED
+ default n
+ help
+ Set this to yes if you want to record the scheduling
+ decisions, so that they can be relayed to userspace
+ for detailed analysis.
+
+config VSERVER_MONITOR_SIZE
+ int "Per-CPU Monitor Queue Size (32-65536)"
+ depends on VSERVER_MONITOR
+ range 32 65536
+ default 1024
+ help
+ This allows you to specify the number of entries in
+ the per-CPU scheduling monitor buffer.
+
+config VSERVER_MONITOR_SYNC
+ int "Per-CPU Monitor Sync Interval (0-65536)"
+ depends on VSERVER_MONITOR
+ range 0 65536
+ default 256
+ help
+ This allows you to specify the interval in ticks
+ when a time sync entry is inserted.
+
+endmenu
+
+
+config VSERVER
+ bool
+ default y
+ select NAMESPACES
+ select UTS_NS
+ select IPC_NS
+ select PID_NS
+ select USER_NS
+ select SYSVIPC
+
+config VSERVER_SECURITY
+ bool
+ depends on SECURITY
+ default y
+ select SECURITY_CAPABILITIES
+
--- a/kernel/vserver/limit.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/limit.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,319 @@
+/*
+ * linux/kernel/vserver/limit.c
+ *
+ * Virtual Server: Context Limits
+ *
+ * Copyright (C) 2004-2007 Herbert P<>tzl
+ *
+ * V0.01 broken out from vcontext V0.05
+ * V0.02 changed vcmds to vxi arg
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/module.h>
+#include <linux/vs_limit.h>
+#include <linux/vserver/limit.h>
+#include <linux/vserver/limit_cmd.h>
+
+#include <asm/uaccess.h>
+
+
+const char *vlimit_name[NUM_LIMITS] = {
+ [RLIMIT_CPU] = "CPU",
+ [RLIMIT_RSS] = "RSS",
+ [RLIMIT_NPROC] = "NPROC",
+ [RLIMIT_NOFILE] = "NOFILE",
+ [RLIMIT_MEMLOCK] = "VML",
+ [RLIMIT_AS] = "VM",
+ [RLIMIT_LOCKS] = "LOCKS",
+ [RLIMIT_SIGPENDING] = "SIGP",
+ [RLIMIT_MSGQUEUE] = "MSGQ",
+
+ [VLIMIT_NSOCK] = "NSOCK",
+ [VLIMIT_OPENFD] = "OPENFD",
+ [VLIMIT_ANON] = "ANON",
+ [VLIMIT_SHMEM] = "SHMEM",
+ [VLIMIT_DENTRY] = "DENTRY",
+};
+
+EXPORT_SYMBOL_GPL(vlimit_name);
+
+#define MASK_ENTRY(x) (1 << (x))
+
+const struct vcmd_ctx_rlimit_mask_v0 vlimit_mask = {
+ /* minimum */
+ 0
+ , /* softlimit */
+ MASK_ENTRY( RLIMIT_RSS ) |
+ MASK_ENTRY( VLIMIT_ANON ) |
+ 0
+ , /* maximum */
+ MASK_ENTRY( RLIMIT_RSS ) |
+ MASK_ENTRY( RLIMIT_NPROC ) |
+ MASK_ENTRY( RLIMIT_NOFILE ) |
+ MASK_ENTRY( RLIMIT_MEMLOCK ) |
+ MASK_ENTRY( RLIMIT_AS ) |
+ MASK_ENTRY( RLIMIT_LOCKS ) |
+ MASK_ENTRY( RLIMIT_MSGQUEUE ) |
+
+ MASK_ENTRY( VLIMIT_NSOCK ) |
+ MASK_ENTRY( VLIMIT_OPENFD ) |
+ MASK_ENTRY( VLIMIT_ANON ) |
+ MASK_ENTRY( VLIMIT_SHMEM ) |
+ MASK_ENTRY( VLIMIT_DENTRY ) |
+ 0
+};
+ /* accounting only */
+uint32_t account_mask =
+ MASK_ENTRY( VLIMIT_SEMARY ) |
+ MASK_ENTRY( VLIMIT_NSEMS ) |
+ MASK_ENTRY( VLIMIT_MAPPED ) |
+ 0;
+
+
+static int is_valid_vlimit(int id)
+{
+ uint32_t mask = vlimit_mask.minimum |
+ vlimit_mask.softlimit | vlimit_mask.maximum;
+ return mask & (1 << id);
+}
+
+static int is_accounted_vlimit(int id)
+{
+ if (is_valid_vlimit(id))
+ return 1;
+ return account_mask & (1 << id);
+}
+
+
+static inline uint64_t vc_get_soft(struct vx_info *vxi, int id)
+{
+ rlim_t limit = __rlim_soft(&vxi->limit, id);
+ return VX_VLIM(limit);
+}
+
+static inline uint64_t vc_get_hard(struct vx_info *vxi, int id)
+{
+ rlim_t limit = __rlim_hard(&vxi->limit, id);
+ return VX_VLIM(limit);
+}
+
+static int do_get_rlimit(struct vx_info *vxi, uint32_t id,
+ uint64_t *minimum, uint64_t *softlimit, uint64_t *maximum)
+{
+ if (!is_valid_vlimit(id))
+ return -EINVAL;
+
+ if (minimum)
+ *minimum = CRLIM_UNSET;
+ if (softlimit)
+ *softlimit = vc_get_soft(vxi, id);
+ if (maximum)
+ *maximum = vc_get_hard(vxi, id);
+ return 0;
+}
+
+int vc_get_rlimit(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_rlimit_v0 vc_data;
+ int ret;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_get_rlimit(vxi, vc_data.id,
+ &vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+static int do_set_rlimit(struct vx_info *vxi, uint32_t id,
+ uint64_t minimum, uint64_t softlimit, uint64_t maximum)
+{
+ if (!is_valid_vlimit(id))
+ return -EINVAL;
+
+ if (maximum != CRLIM_KEEP)
+ __rlim_hard(&vxi->limit, id) = VX_RLIM(maximum);
+ if (softlimit != CRLIM_KEEP)
+ __rlim_soft(&vxi->limit, id) = VX_RLIM(softlimit);
+
+ /* clamp soft limit */
+ if (__rlim_soft(&vxi->limit, id) > __rlim_hard(&vxi->limit, id))
+ __rlim_soft(&vxi->limit, id) = __rlim_hard(&vxi->limit, id);
+
+ return 0;
+}
+
+int vc_set_rlimit(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_rlimit_v0 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_set_rlimit(vxi, vc_data.id,
+ vc_data.minimum, vc_data.softlimit, vc_data.maximum);
+}
+
+#ifdef CONFIG_IA32_EMULATION
+
+int vc_set_rlimit_x32(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_rlimit_v0_x32 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_set_rlimit(vxi, vc_data.id,
+ vc_data.minimum, vc_data.softlimit, vc_data.maximum);
+}
+
+int vc_get_rlimit_x32(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_rlimit_v0_x32 vc_data;
+ int ret;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_get_rlimit(vxi, vc_data.id,
+ &vc_data.minimum, &vc_data.softlimit, &vc_data.maximum);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+#endif /* CONFIG_IA32_EMULATION */
+
+
+int vc_get_rlimit_mask(uint32_t id, void __user *data)
+{
+ if (copy_to_user(data, &vlimit_mask, sizeof(vlimit_mask)))
+ return -EFAULT;
+ return 0;
+}
+
+
+static inline void vx_reset_minmax(struct _vx_limit *limit)
+{
+ rlim_t value;
+ int lim;
+
+ for (lim = 0; lim < NUM_LIMITS; lim++) {
+ value = __rlim_get(limit, lim);
+ __rlim_rmax(limit, lim) = value;
+ __rlim_rmin(limit, lim) = value;
+ }
+}
+
+
+int vc_reset_minmax(struct vx_info *vxi, void __user *data)
+{
+ vx_reset_minmax(&vxi->limit);
+ return 0;
+}
+
+
+int vc_rlimit_stat(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_rlimit_stat_v0 vc_data;
+ struct _vx_limit *limit = &vxi->limit;
+ int id;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ id = vc_data.id;
+ if (!is_accounted_vlimit(id))
+ return -EINVAL;
+
+ vx_limit_fixup(limit, id);
+ vc_data.hits = atomic_read(&__rlim_lhit(limit, id));
+ vc_data.value = __rlim_get(limit, id);
+ vc_data.minimum = __rlim_rmin(limit, id);
+ vc_data.maximum = __rlim_rmax(limit, id);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+
+void vx_vsi_meminfo(struct sysinfo *val)
+{
+ struct vx_info *vxi = current->vx_info;
+ unsigned long totalram, freeram;
+ rlim_t v;
+
+ /* we blindly accept the max */
+ v = __rlim_soft(&vxi->limit, RLIMIT_RSS);
+ totalram = (v != RLIM_INFINITY) ? v : val->totalram;
+
+ /* total minus used equals free */
+ v = __vx_cres_array_fixup(&vxi->limit, VLA_RSS);
+ freeram = (v < totalram) ? totalram - v : 0;
+
+ val->totalram = totalram;
+ val->freeram = freeram;
+ val->bufferram = 0;
+ val->totalhigh = 0;
+ val->freehigh = 0;
+ return;
+}
+
+void vx_vsi_swapinfo(struct sysinfo *val)
+{
+ struct vx_info *vxi = current->vx_info;
+ unsigned long totalswap, freeswap;
+ rlim_t v, w;
+
+ v = __rlim_soft(&vxi->limit, RLIMIT_RSS);
+ if (v == RLIM_INFINITY) {
+ val->freeswap = val->totalswap;
+ return;
+ }
+
+ /* we blindly accept the max */
+ w = __rlim_hard(&vxi->limit, RLIMIT_RSS);
+ totalswap = (w != RLIM_INFINITY) ? (w - v) : val->totalswap;
+
+ /* currently 'used' swap */
+ w = __vx_cres_array_fixup(&vxi->limit, VLA_RSS);
+ w -= (w > v) ? v : w;
+
+ /* total minus used equals free */
+ freeswap = (w < totalswap) ? totalswap - w : 0;
+
+ val->totalswap = totalswap;
+ val->freeswap = freeswap;
+ return;
+}
+
+
+unsigned long vx_badness(struct task_struct *task, struct mm_struct *mm)
+{
+ struct vx_info *vxi = mm->mm_vx_info;
+ unsigned long points;
+ rlim_t v, w;
+
+ if (!vxi)
+ return 0;
+
+ points = vxi->vx_badness_bias;
+
+ v = __vx_cres_array_fixup(&vxi->limit, VLA_RSS);
+ w = __rlim_soft(&vxi->limit, RLIMIT_RSS);
+ points += (v > w) ? (v - w) : 0;
+
+ return points;
+}
+
--- a/kernel/vserver/limit_init.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/limit_init.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,33 @@
+
+
+static inline void vx_info_init_limit(struct _vx_limit *limit)
+{
+ int lim;
+
+ for (lim = 0; lim < NUM_LIMITS; lim++) {
+ __rlim_soft(limit, lim) = RLIM_INFINITY;
+ __rlim_hard(limit, lim) = RLIM_INFINITY;
+ __rlim_set(limit, lim, 0);
+ atomic_set(&__rlim_lhit(limit, lim), 0);
+ __rlim_rmin(limit, lim) = 0;
+ __rlim_rmax(limit, lim) = 0;
+ }
+}
+
+static inline void vx_info_exit_limit(struct _vx_limit *limit)
+{
+#ifdef CONFIG_VSERVER_WARN
+ rlim_t value;
+ int lim;
+
+ for (lim = 0; lim < NUM_LIMITS; lim++) {
+ if ((1 << lim) & VLIM_NOCHECK)
+ continue;
+ value = __rlim_get(limit, lim);
+ vxwprintk_xid(value,
+ "!!! limit: %p[%s,%d] = %ld on exit.",
+ limit, vlimit_name[lim], lim, (long)value);
+ }
+#endif
+}
+
--- a/kernel/vserver/limit_proc.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/limit_proc.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,57 @@
+#ifndef _VX_LIMIT_PROC_H
+#define _VX_LIMIT_PROC_H
+
+#include <linux/vserver/limit_int.h>
+
+
+#define VX_LIMIT_FMT ":\t%8ld\t%8ld/%8ld\t%8lld/%8lld\t%6d\n"
+#define VX_LIMIT_TOP \
+ "Limit\t current\t min/max\t\t soft/hard\t\thits\n"
+
+#define VX_LIMIT_ARG(r) \
+ (unsigned long)__rlim_get(limit, r), \
+ (unsigned long)__rlim_rmin(limit, r), \
+ (unsigned long)__rlim_rmax(limit, r), \
+ VX_VLIM(__rlim_soft(limit, r)), \
+ VX_VLIM(__rlim_hard(limit, r)), \
+ atomic_read(&__rlim_lhit(limit, r))
+
+static inline int vx_info_proc_limit(struct _vx_limit *limit, char *buffer)
+{
+ vx_limit_fixup(limit, -1);
+ return sprintf(buffer, VX_LIMIT_TOP
+ "PROC" VX_LIMIT_FMT
+ "VM" VX_LIMIT_FMT
+ "VML" VX_LIMIT_FMT
+ "RSS" VX_LIMIT_FMT
+ "ANON" VX_LIMIT_FMT
+ "RMAP" VX_LIMIT_FMT
+ "FILES" VX_LIMIT_FMT
+ "OFD" VX_LIMIT_FMT
+ "LOCKS" VX_LIMIT_FMT
+ "SOCK" VX_LIMIT_FMT
+ "MSGQ" VX_LIMIT_FMT
+ "SHM" VX_LIMIT_FMT
+ "SEMA" VX_LIMIT_FMT
+ "SEMS" VX_LIMIT_FMT
+ "DENT" VX_LIMIT_FMT,
+ VX_LIMIT_ARG(RLIMIT_NPROC),
+ VX_LIMIT_ARG(RLIMIT_AS),
+ VX_LIMIT_ARG(RLIMIT_MEMLOCK),
+ VX_LIMIT_ARG(RLIMIT_RSS),
+ VX_LIMIT_ARG(VLIMIT_ANON),
+ VX_LIMIT_ARG(VLIMIT_MAPPED),
+ VX_LIMIT_ARG(RLIMIT_NOFILE),
+ VX_LIMIT_ARG(VLIMIT_OPENFD),
+ VX_LIMIT_ARG(RLIMIT_LOCKS),
+ VX_LIMIT_ARG(VLIMIT_NSOCK),
+ VX_LIMIT_ARG(RLIMIT_MSGQUEUE),
+ VX_LIMIT_ARG(VLIMIT_SHMEM),
+ VX_LIMIT_ARG(VLIMIT_SEMARY),
+ VX_LIMIT_ARG(VLIMIT_NSEMS),
+ VX_LIMIT_ARG(VLIMIT_DENTRY));
+}
+
+#endif /* _VX_LIMIT_PROC_H */
+
+
--- a/kernel/vserver/Makefile 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/Makefile 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,18 @@
+#
+# Makefile for the Linux vserver routines.
+#
+
+
+obj-y += vserver.o
+
+vserver-y := switch.o context.o space.o sched.o network.o inode.o \
+ limit.o cvirt.o cacct.o signal.o helper.o init.o \
+ dlimit.o tag.o
+
+vserver-$(CONFIG_INET) += inet.o
+vserver-$(CONFIG_PROC_FS) += proc.o
+vserver-$(CONFIG_VSERVER_DEBUG) += sysctl.o debug.o
+vserver-$(CONFIG_VSERVER_HISTORY) += history.o
+vserver-$(CONFIG_VSERVER_MONITOR) += monitor.o
+vserver-$(CONFIG_VSERVER_DEVICE) += device.o
+
--- a/kernel/vserver/monitor.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/monitor.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,138 @@
+/*
+ * kernel/vserver/monitor.c
+ *
+ * Virtual Context Scheduler Monitor
+ *
+ * Copyright (C) 2006-2007 Herbert P<>tzl
+ *
+ * V0.01 basic design
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <asm/uaccess.h>
+#include <asm/atomic.h>
+
+#include <linux/vserver/monitor.h>
+#include <linux/vserver/debug_cmd.h>
+
+
+#ifdef CONFIG_VSERVER_MONITOR
+#define VXM_SIZE CONFIG_VSERVER_MONITOR_SIZE
+#else
+#define VXM_SIZE 64
+#endif
+
+struct _vx_monitor {
+ unsigned int counter;
+
+ struct _vx_mon_entry entry[VXM_SIZE+1];
+};
+
+
+DEFINE_PER_CPU(struct _vx_monitor, vx_monitor_buffer);
+
+unsigned volatile int vxm_active = 1;
+
+static atomic_t sequence = ATOMIC_INIT(0);
+
+
+/* vxm_advance()
+
+ * requires disabled preemption */
+
+struct _vx_mon_entry *vxm_advance(int cpu)
+{
+ struct _vx_monitor *mon = &per_cpu(vx_monitor_buffer, cpu);
+ struct _vx_mon_entry *entry;
+ unsigned int index;
+
+ index = vxm_active ? (mon->counter++ % VXM_SIZE) : VXM_SIZE;
+ entry = &mon->entry[index];
+
+ entry->ev.seq = atomic_inc_return(&sequence);
+ entry->ev.jif = jiffies;
+ return entry;
+}
+
+EXPORT_SYMBOL_GPL(vxm_advance);
+
+
+int do_read_monitor(struct __user _vx_mon_entry *data,
+ int cpu, uint32_t *index, uint32_t *count)
+{
+ int pos, ret = 0;
+ struct _vx_monitor *mon = &per_cpu(vx_monitor_buffer, cpu);
+ int end = mon->counter;
+ int start = end - VXM_SIZE + 2;
+ int idx = *index;
+
+ /* special case: get current pos */
+ if (!*count) {
+ *index = end;
+ return 0;
+ }
+
+ /* have we lost some data? */
+ if (idx < start)
+ idx = start;
+
+ for (pos = 0; (pos < *count) && (idx < end); pos++, idx++) {
+ struct _vx_mon_entry *entry =
+ &mon->entry[idx % VXM_SIZE];
+
+ /* send entry to userspace */
+ ret = copy_to_user(&data[pos], entry, sizeof(*entry));
+ if (ret)
+ break;
+ }
+ /* save new index and count */
+ *index = idx;
+ *count = pos;
+ return ret ? ret : (*index < end);
+}
+
+int vc_read_monitor(uint32_t id, void __user *data)
+{
+ struct vcmd_read_monitor_v0 vc_data;
+ int ret;
+
+ if (id >= NR_CPUS)
+ return -EINVAL;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_read_monitor((struct __user _vx_mon_entry *)vc_data.data,
+ id, &vc_data.index, &vc_data.count);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+int vc_read_monitor_x32(uint32_t id, void __user *data)
+{
+ struct vcmd_read_monitor_v0_x32 vc_data;
+ int ret;
+
+ if (id >= NR_CPUS)
+ return -EINVAL;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ ret = do_read_monitor((struct __user _vx_mon_entry *)
+ compat_ptr(vc_data.data_ptr),
+ id, &vc_data.index, &vc_data.count);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
--- a/kernel/vserver/network.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/network.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,864 @@
+/*
+ * linux/kernel/vserver/network.c
+ *
+ * Virtual Server: Network Support
+ *
+ * Copyright (C) 2003-2007 Herbert P<>tzl
+ *
+ * V0.01 broken out from vcontext V0.05
+ * V0.02 cleaned up implementation
+ * V0.03 added equiv nx commands
+ * V0.04 switch to RCU based hash
+ * V0.05 and back to locking again
+ * V0.06 changed vcmds to nxi arg
+ * V0.07 have __create claim() the nxi
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/rcupdate.h>
+
+#include <linux/vs_network.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/network_cmd.h>
+
+
+atomic_t nx_global_ctotal = ATOMIC_INIT(0);
+atomic_t nx_global_cactive = ATOMIC_INIT(0);
+
+static struct kmem_cache *nx_addr_v4_cachep = NULL;
+static struct kmem_cache *nx_addr_v6_cachep = NULL;
+
+
+static int __init init_network(void)
+{
+ nx_addr_v4_cachep = kmem_cache_create("nx_v4_addr_cache",
+ sizeof(struct nx_addr_v4), 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ nx_addr_v6_cachep = kmem_cache_create("nx_v6_addr_cache",
+ sizeof(struct nx_addr_v6), 0,
+ SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
+ return 0;
+}
+
+
+/* __alloc_nx_addr_v4() */
+
+static inline struct nx_addr_v4 *__alloc_nx_addr_v4(void)
+{
+ struct nx_addr_v4 *nxa = kmem_cache_alloc(
+ nx_addr_v4_cachep, GFP_KERNEL);
+
+ if (!IS_ERR(nxa))
+ memset(nxa, 0, sizeof(*nxa));
+ return nxa;
+}
+
+/* __dealloc_nx_addr_v4() */
+
+static inline void __dealloc_nx_addr_v4(struct nx_addr_v4 *nxa)
+{
+ kmem_cache_free(nx_addr_v4_cachep, nxa);
+}
+
+/* __dealloc_nx_addr_v4_all() */
+
+static inline void __dealloc_nx_addr_v4_all(struct nx_addr_v4 *nxa)
+{
+ while (nxa) {
+ struct nx_addr_v4 *next = nxa->next;
+
+ __dealloc_nx_addr_v4(nxa);
+ nxa = next;
+ }
+}
+
+
+#ifdef CONFIG_IPV6
+
+/* __alloc_nx_addr_v6() */
+
+static inline struct nx_addr_v6 *__alloc_nx_addr_v6(void)
+{
+ struct nx_addr_v6 *nxa = kmem_cache_alloc(
+ nx_addr_v6_cachep, GFP_KERNEL);
+
+ if (!IS_ERR(nxa))
+ memset(nxa, 0, sizeof(*nxa));
+ return nxa;
+}
+
+/* __dealloc_nx_addr_v6() */
+
+static inline void __dealloc_nx_addr_v6(struct nx_addr_v6 *nxa)
+{
+ kmem_cache_free(nx_addr_v6_cachep, nxa);
+}
+
+/* __dealloc_nx_addr_v6_all() */
+
+static inline void __dealloc_nx_addr_v6_all(struct nx_addr_v6 *nxa)
+{
+ while (nxa) {
+ struct nx_addr_v6 *next = nxa->next;
+
+ __dealloc_nx_addr_v6(nxa);
+ nxa = next;
+ }
+}
+
+#endif /* CONFIG_IPV6 */
+
+/* __alloc_nx_info()
+
+ * allocate an initialized nx_info struct
+ * doesn't make it visible (hash) */
+
+static struct nx_info *__alloc_nx_info(nid_t nid)
+{
+ struct nx_info *new = NULL;
+
+ vxdprintk(VXD_CBIT(nid, 1), "alloc_nx_info(%d)*", nid);
+
+ /* would this benefit from a slab cache? */
+ new = kmalloc(sizeof(struct nx_info), GFP_KERNEL);
+ if (!new)
+ return 0;
+
+ memset(new, 0, sizeof(struct nx_info));
+ new->nx_id = nid;
+ INIT_HLIST_NODE(&new->nx_hlist);
+ atomic_set(&new->nx_usecnt, 0);
+ atomic_set(&new->nx_tasks, 0);
+ new->nx_state = 0;
+
+ new->nx_flags = NXF_INIT_SET;
+
+ /* rest of init goes here */
+
+ new->v4_lback.s_addr = htonl(INADDR_LOOPBACK);
+ new->v4_bcast.s_addr = htonl(INADDR_BROADCAST);
+
+ vxdprintk(VXD_CBIT(nid, 0),
+ "alloc_nx_info(%d) = %p", nid, new);
+ atomic_inc(&nx_global_ctotal);
+ return new;
+}
+
+/* __dealloc_nx_info()
+
+ * final disposal of nx_info */
+
+static void __dealloc_nx_info(struct nx_info *nxi)
+{
+ vxdprintk(VXD_CBIT(nid, 0),
+ "dealloc_nx_info(%p)", nxi);
+
+ nxi->nx_hlist.next = LIST_POISON1;
+ nxi->nx_id = -1;
+
+ BUG_ON(atomic_read(&nxi->nx_usecnt));
+ BUG_ON(atomic_read(&nxi->nx_tasks));
+
+ __dealloc_nx_addr_v4_all(nxi->v4.next);
+
+ nxi->nx_state |= NXS_RELEASED;
+ kfree(nxi);
+ atomic_dec(&nx_global_ctotal);
+}
+
+static void __shutdown_nx_info(struct nx_info *nxi)
+{
+ nxi->nx_state |= NXS_SHUTDOWN;
+ vs_net_change(nxi, VSC_NETDOWN);
+}
+
+/* exported stuff */
+
+void free_nx_info(struct nx_info *nxi)
+{
+ /* context shutdown is mandatory */
+ BUG_ON(nxi->nx_state != NXS_SHUTDOWN);
+
+ /* context must not be hashed */
+ BUG_ON(nxi->nx_state & NXS_HASHED);
+
+ BUG_ON(atomic_read(&nxi->nx_usecnt));
+ BUG_ON(atomic_read(&nxi->nx_tasks));
+
+ __dealloc_nx_info(nxi);
+}
+
+
+void __nx_set_lback(struct nx_info *nxi)
+{
+ int nid = nxi->nx_id;
+ __be32 lback = htonl(INADDR_LOOPBACK ^ ((nid & 0xFFFF) << 8));
+
+ nxi->v4_lback.s_addr = lback;
+}
+
+extern int __nx_inet_add_lback(__be32 addr);
+extern int __nx_inet_del_lback(__be32 addr);
+
+
+/* hash table for nx_info hash */
+
+#define NX_HASH_SIZE 13
+
+struct hlist_head nx_info_hash[NX_HASH_SIZE];
+
+static spinlock_t nx_info_hash_lock = SPIN_LOCK_UNLOCKED;
+
+
+static inline unsigned int __hashval(nid_t nid)
+{
+ return (nid % NX_HASH_SIZE);
+}
+
+
+
+/* __hash_nx_info()
+
+ * add the nxi to the global hash table
+ * requires the hash_lock to be held */
+
+static inline void __hash_nx_info(struct nx_info *nxi)
+{
+ struct hlist_head *head;
+
+ vxd_assert_lock(&nx_info_hash_lock);
+ vxdprintk(VXD_CBIT(nid, 4),
+ "__hash_nx_info: %p[#%d]", nxi, nxi->nx_id);
+
+ /* context must not be hashed */
+ BUG_ON(nx_info_state(nxi, NXS_HASHED));
+
+ nxi->nx_state |= NXS_HASHED;
+ head = &nx_info_hash[__hashval(nxi->nx_id)];
+ hlist_add_head(&nxi->nx_hlist, head);
+ atomic_inc(&nx_global_cactive);
+}
+
+/* __unhash_nx_info()
+
+ * remove the nxi from the global hash table
+ * requires the hash_lock to be held */
+
+static inline void __unhash_nx_info(struct nx_info *nxi)
+{
+ vxd_assert_lock(&nx_info_hash_lock);
+ vxdprintk(VXD_CBIT(nid, 4),
+ "__unhash_nx_info: %p[#%d.%d.%d]", nxi, nxi->nx_id,
+ atomic_read(&nxi->nx_usecnt), atomic_read(&nxi->nx_tasks));
+
+ /* context must be hashed */
+ BUG_ON(!nx_info_state(nxi, NXS_HASHED));
+ /* but without tasks */
+ BUG_ON(atomic_read(&nxi->nx_tasks));
+
+ nxi->nx_state &= ~NXS_HASHED;
+ hlist_del(&nxi->nx_hlist);
+ atomic_dec(&nx_global_cactive);
+}
+
+
+/* __lookup_nx_info()
+
+ * requires the hash_lock to be held
+ * doesn't increment the nx_refcnt */
+
+static inline struct nx_info *__lookup_nx_info(nid_t nid)
+{
+ struct hlist_head *head = &nx_info_hash[__hashval(nid)];
+ struct hlist_node *pos;
+ struct nx_info *nxi;
+
+ vxd_assert_lock(&nx_info_hash_lock);
+ hlist_for_each(pos, head) {
+ nxi = hlist_entry(pos, struct nx_info, nx_hlist);
+
+ if (nxi->nx_id == nid)
+ goto found;
+ }
+ nxi = NULL;
+found:
+ vxdprintk(VXD_CBIT(nid, 0),
+ "__lookup_nx_info(#%u): %p[#%u]",
+ nid, nxi, nxi ? nxi->nx_id : 0);
+ return nxi;
+}
+
+
+/* __create_nx_info()
+
+ * create the requested context
+ * get(), claim() and hash it */
+
+static struct nx_info *__create_nx_info(int id)
+{
+ struct nx_info *new, *nxi = NULL;
+
+ vxdprintk(VXD_CBIT(nid, 1), "create_nx_info(%d)*", id);
+
+ if (!(new = __alloc_nx_info(id)))
+ return ERR_PTR(-ENOMEM);
+
+ /* required to make dynamic xids unique */
+ spin_lock(&nx_info_hash_lock);
+
+ /* static context requested */
+ if ((nxi = __lookup_nx_info(id))) {
+ vxdprintk(VXD_CBIT(nid, 0),
+ "create_nx_info(%d) = %p (already there)", id, nxi);
+ if (nx_info_flags(nxi, NXF_STATE_SETUP, 0))
+ nxi = ERR_PTR(-EBUSY);
+ else
+ nxi = ERR_PTR(-EEXIST);
+ goto out_unlock;
+ }
+ /* new context */
+ vxdprintk(VXD_CBIT(nid, 0),
+ "create_nx_info(%d) = %p (new)", id, new);
+ claim_nx_info(new, NULL);
+ __nx_set_lback(new);
+ __hash_nx_info(get_nx_info(new));
+ nxi = new, new = NULL;
+
+out_unlock:
+ spin_unlock(&nx_info_hash_lock);
+ if (new)
+ __dealloc_nx_info(new);
+ return nxi;
+}
+
+
+
+/* exported stuff */
+
+
+void unhash_nx_info(struct nx_info *nxi)
+{
+ __shutdown_nx_info(nxi);
+ spin_lock(&nx_info_hash_lock);
+ __unhash_nx_info(nxi);
+ spin_unlock(&nx_info_hash_lock);
+}
+
+/* lookup_nx_info()
+
+ * search for a nx_info and get() it
+ * negative id means current */
+
+struct nx_info *lookup_nx_info(int id)
+{
+ struct nx_info *nxi = NULL;
+
+ if (id < 0) {
+ nxi = get_nx_info(current->nx_info);
+ } else if (id > 1) {
+ spin_lock(&nx_info_hash_lock);
+ nxi = get_nx_info(__lookup_nx_info(id));
+ spin_unlock(&nx_info_hash_lock);
+ }
+ return nxi;
+}
+
+/* nid_is_hashed()
+
+ * verify that nid is still hashed */
+
+int nid_is_hashed(nid_t nid)
+{
+ int hashed;
+
+ spin_lock(&nx_info_hash_lock);
+ hashed = (__lookup_nx_info(nid) != NULL);
+ spin_unlock(&nx_info_hash_lock);
+ return hashed;
+}
+
+
+#ifdef CONFIG_PROC_FS
+
+/* get_nid_list()
+
+ * get a subset of hashed nids for proc
+ * assumes size is at least one */
+
+int get_nid_list(int index, unsigned int *nids, int size)
+{
+ int hindex, nr_nids = 0;
+
+ /* only show current and children */
+ if (!nx_check(0, VS_ADMIN | VS_WATCH)) {
+ if (index > 0)
+ return 0;
+ nids[nr_nids] = nx_current_nid();
+ return 1;
+ }
+
+ for (hindex = 0; hindex < NX_HASH_SIZE; hindex++) {
+ struct hlist_head *head = &nx_info_hash[hindex];
+ struct hlist_node *pos;
+
+ spin_lock(&nx_info_hash_lock);
+ hlist_for_each(pos, head) {
+ struct nx_info *nxi;
+
+ if (--index > 0)
+ continue;
+
+ nxi = hlist_entry(pos, struct nx_info, nx_hlist);
+ nids[nr_nids] = nxi->nx_id;
+ if (++nr_nids >= size) {
+ spin_unlock(&nx_info_hash_lock);
+ goto out;
+ }
+ }
+ /* keep the lock time short */
+ spin_unlock(&nx_info_hash_lock);
+ }
+out:
+ return nr_nids;
+}
+#endif
+
+
+/*
+ * migrate task to new network
+ * gets nxi, puts old_nxi on change
+ */
+
+int nx_migrate_task(struct task_struct *p, struct nx_info *nxi)
+{
+ struct nx_info *old_nxi;
+ int ret = 0;
+
+ if (!p || !nxi)
+ BUG();
+
+ vxdprintk(VXD_CBIT(nid, 5),
+ "nx_migrate_task(%p,%p[#%d.%d.%d])",
+ p, nxi, nxi->nx_id,
+ atomic_read(&nxi->nx_usecnt),
+ atomic_read(&nxi->nx_tasks));
+
+ if (nx_info_flags(nxi, NXF_INFO_PRIVATE, 0) &&
+ !nx_info_flags(nxi, NXF_STATE_SETUP, 0))
+ return -EACCES;
+
+ if (nx_info_state(nxi, NXS_SHUTDOWN))
+ return -EFAULT;
+
+ /* maybe disallow this completely? */
+ old_nxi = task_get_nx_info(p);
+ if (old_nxi == nxi)
+ goto out;
+
+ task_lock(p);
+ if (old_nxi)
+ clr_nx_info(&p->nx_info);
+ claim_nx_info(nxi, p);
+ set_nx_info(&p->nx_info, nxi);
+ p->nid = nxi->nx_id;
+ task_unlock(p);
+
+ vxdprintk(VXD_CBIT(nid, 5),
+ "moved task %p into nxi:%p[#%d]",
+ p, nxi, nxi->nx_id);
+
+ if (old_nxi)
+ release_nx_info(old_nxi, p);
+ ret = 0;
+out:
+ put_nx_info(old_nxi);
+ return ret;
+}
+
+
+void nx_set_persistent(struct nx_info *nxi)
+{
+ vxdprintk(VXD_CBIT(nid, 6),
+ "nx_set_persistent(%p[#%d])", nxi, nxi->nx_id);
+
+ get_nx_info(nxi);
+ claim_nx_info(nxi, NULL);
+}
+
+void nx_clear_persistent(struct nx_info *nxi)
+{
+ vxdprintk(VXD_CBIT(nid, 6),
+ "nx_clear_persistent(%p[#%d])", nxi, nxi->nx_id);
+
+ release_nx_info(nxi, NULL);
+ put_nx_info(nxi);
+}
+
+void nx_update_persistent(struct nx_info *nxi)
+{
+ if (nx_info_flags(nxi, NXF_PERSISTENT, 0))
+ nx_set_persistent(nxi);
+ else
+ nx_clear_persistent(nxi);
+}
+
+/* vserver syscall commands below here */
+
+/* taks nid and nx_info functions */
+
+#include <asm/uaccess.h>
+
+
+int vc_task_nid(uint32_t id)
+{
+ nid_t nid;
+
+ if (id) {
+ struct task_struct *tsk;
+
+ read_lock(&tasklist_lock);
+ tsk = find_task_by_real_pid(id);
+ nid = (tsk) ? tsk->nid : -ESRCH;
+ read_unlock(&tasklist_lock);
+ } else
+ nid = nx_current_nid();
+ return nid;
+}
+
+
+int vc_nx_info(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_nx_info_v0 vc_data;
+
+ vc_data.nid = nxi->nx_id;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+
+/* network functions */
+
+int vc_net_create(uint32_t nid, void __user *data)
+{
+ struct vcmd_net_create vc_data = { .flagword = NXF_INIT_SET };
+ struct nx_info *new_nxi;
+ int ret;
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ if ((nid > MAX_S_CONTEXT) || (nid < 2))
+ return -EINVAL;
+
+ new_nxi = __create_nx_info(nid);
+ if (IS_ERR(new_nxi))
+ return PTR_ERR(new_nxi);
+
+ /* initial flags */
+ new_nxi->nx_flags = vc_data.flagword;
+
+ ret = -ENOEXEC;
+ if (vs_net_change(new_nxi, VSC_NETUP))
+ goto out;
+
+ ret = nx_migrate_task(current, new_nxi);
+ if (ret)
+ goto out;
+
+ /* return context id on success */
+ ret = new_nxi->nx_id;
+
+ /* get a reference for persistent contexts */
+ if ((vc_data.flagword & NXF_PERSISTENT))
+ nx_set_persistent(new_nxi);
+out:
+ release_nx_info(new_nxi, NULL);
+ put_nx_info(new_nxi);
+ return ret;
+}
+
+
+int vc_net_migrate(struct nx_info *nxi, void __user *data)
+{
+ return nx_migrate_task(current, nxi);
+}
+
+
+
+int do_add_v4_addr(struct nx_info *nxi, __be32 ip, __be32 ip2, __be32 mask,
+ uint16_t type, uint16_t flags)
+{
+ struct nx_addr_v4 *nxa = &nxi->v4;
+
+ if (NX_IPV4(nxi)) {
+ /* locate last entry */
+ for (; nxa->next; nxa = nxa->next);
+ nxa->next = __alloc_nx_addr_v4();
+ nxa = nxa->next;
+
+ if (IS_ERR(nxa))
+ return PTR_ERR(nxa);
+ }
+
+ if (nxi->v4.next)
+ /* remove single ip for ip list */
+ nxi->nx_flags &= ~NXF_SINGLE_IP;
+
+ nxa->ip[0].s_addr = ip;
+ nxa->ip[1].s_addr = ip2;
+ nxa->mask.s_addr = mask;
+ nxa->type = type;
+ nxa->flags = flags;
+ return 0;
+}
+
+
+int vc_net_add(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_addr_v0 vc_data;
+ int index, ret = 0;
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ switch (vc_data.type) {
+ case NXA_TYPE_IPV4:
+ if ((vc_data.count < 1) || (vc_data.count > 4))
+ return -EINVAL;
+
+ index = 0;
+ while (index < vc_data.count) {
+ ret = do_add_v4_addr(nxi, vc_data.ip[index].s_addr, 0,
+ vc_data.mask[index].s_addr, NXA_TYPE_ADDR, 0);
+ if (ret)
+ return ret;
+ index++;
+ }
+ ret = index;
+ break;
+
+ case NXA_TYPE_IPV4|NXA_MOD_BCAST:
+ nxi->v4_bcast = vc_data.ip[0];
+ ret = 1;
+ break;
+
+ case NXA_TYPE_IPV4|NXA_MOD_LBACK:
+ nxi->v4_lback = vc_data.ip[0];
+ ret = 1;
+ break;
+
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+int vc_net_remove(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_addr_v0 vc_data;
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ switch (vc_data.type) {
+ case NXA_TYPE_ANY:
+ __dealloc_nx_addr_v4_all(xchg(&nxi->v4.next, NULL));
+ memset(&nxi->v4, 0, sizeof(nxi->v4));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+int vc_net_add_ipv4(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_addr_ipv4_v1 vc_data;
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ switch (vc_data.type) {
+ case NXA_TYPE_ADDR:
+ case NXA_TYPE_RANGE:
+ case NXA_TYPE_MASK:
+ return do_add_v4_addr(nxi, vc_data.ip.s_addr, 0,
+ vc_data.mask.s_addr, vc_data.type, vc_data.flags);
+
+ case NXA_TYPE_ADDR | NXA_MOD_BCAST:
+ nxi->v4_bcast = vc_data.ip;
+ break;
+
+ case NXA_TYPE_ADDR | NXA_MOD_LBACK:
+ nxi->v4_lback = vc_data.ip;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vc_net_remove_ipv4(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_addr_ipv4_v1 vc_data;
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ switch (vc_data.type) {
+/* case NXA_TYPE_ADDR:
+ break; */
+
+ case NXA_TYPE_ANY:
+ __dealloc_nx_addr_v4_all(xchg(&nxi->v4.next, NULL));
+ memset(&nxi->v4, 0, sizeof(nxi->v4));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+
+#ifdef CONFIG_IPV6
+
+int do_add_v6_addr(struct nx_info *nxi,
+ struct in6_addr *ip, struct in6_addr *mask,
+ uint32_t prefix, uint16_t type, uint16_t flags)
+{
+ struct nx_addr_v6 *nxa = &nxi->v6;
+
+ if (NX_IPV6(nxi)) {
+ /* locate last entry */
+ for (; nxa->next; nxa = nxa->next);
+ nxa->next = __alloc_nx_addr_v6();
+ nxa = nxa->next;
+
+ if (IS_ERR(nxa))
+ return PTR_ERR(nxa);
+ }
+
+ nxa->ip = *ip;
+ nxa->mask = *mask;
+ nxa->prefix = prefix;
+ nxa->type = type;
+ nxa->flags = flags;
+ return 0;
+}
+
+
+int vc_net_add_ipv6(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_addr_ipv6_v1 vc_data;
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ switch (vc_data.type) {
+ case NXA_TYPE_ADDR:
+ case NXA_TYPE_MASK:
+ return do_add_v6_addr(nxi, &vc_data.ip, &vc_data.mask,
+ vc_data.prefix, vc_data.type, vc_data.flags);
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int vc_net_remove_ipv6(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_addr_ipv6_v1 vc_data;
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ switch (vc_data.type) {
+ case NXA_TYPE_ANY:
+ __dealloc_nx_addr_v6_all(xchg(&nxi->v6.next, NULL));
+ memset(&nxi->v6, 0, sizeof(nxi->v6));
+ break;
+
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+#endif /* CONFIG_IPV6 */
+
+
+int vc_get_nflags(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_flags_v0 vc_data;
+
+ vc_data.flagword = nxi->nx_flags;
+
+ /* special STATE flag handling */
+ vc_data.mask = vs_mask_flags(~0ULL, nxi->nx_flags, NXF_ONE_TIME);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+int vc_set_nflags(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_flags_v0 vc_data;
+ uint64_t mask, trigger;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ /* special STATE flag handling */
+ mask = vs_mask_mask(vc_data.mask, nxi->nx_flags, NXF_ONE_TIME);
+ trigger = (mask & nxi->nx_flags) ^ (mask & vc_data.flagword);
+
+ nxi->nx_flags = vs_mask_flags(nxi->nx_flags,
+ vc_data.flagword, mask);
+ if (trigger & NXF_PERSISTENT)
+ nx_update_persistent(nxi);
+
+ return 0;
+}
+
+int vc_get_ncaps(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_caps_v0 vc_data;
+
+ vc_data.ncaps = nxi->nx_ncaps;
+ vc_data.cmask = ~0ULL;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+int vc_set_ncaps(struct nx_info *nxi, void __user *data)
+{
+ struct vcmd_net_caps_v0 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ nxi->nx_ncaps = vs_mask_flags(nxi->nx_ncaps,
+ vc_data.ncaps, vc_data.cmask);
+ return 0;
+}
+
+
+#include <linux/module.h>
+
+module_init(init_network);
+
+EXPORT_SYMBOL_GPL(free_nx_info);
+EXPORT_SYMBOL_GPL(unhash_nx_info);
+
--- a/kernel/vserver/proc.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/proc.c 2008-04-21 13:42:34.000000000 -0400
@@ -0,0 +1,1086 @@
+/*
+ * linux/kernel/vserver/proc.c
+ *
+ * Virtual Context Support
+ *
+ * Copyright (C) 2003-2007 Herbert P<>tzl
+ *
+ * V0.01 basic structure
+ * V0.02 adaptation vs1.3.0
+ * V0.03 proc permissions
+ * V0.04 locking/generic
+ * V0.05 next generation procfs
+ * V0.06 inode validation
+ * V0.07 generic rewrite vid
+ * V0.08 remove inode type
+ *
+ */
+
+#include <linux/proc_fs.h>
+#include <asm/unistd.h>
+
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_cvirt.h>
+
+#include <linux/in.h>
+#include <linux/inetdevice.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
+
+#include <linux/vserver/global.h>
+
+#include "cvirt_proc.h"
+#include "cacct_proc.h"
+#include "limit_proc.h"
+#include "sched_proc.h"
+#include "vci_config.h"
+
+
+static inline char *print_cap_t(char *buffer, kernel_cap_t *c)
+{
+ unsigned __capi;
+
+ CAP_FOR_EACH_U32(__capi) {
+ buffer += sprintf(buffer, "%08x",
+ c->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
+ }
+ return buffer;
+}
+
+
+static struct proc_dir_entry *proc_virtual;
+
+static struct proc_dir_entry *proc_virtnet;
+
+
+/* first the actual feeds */
+
+
+static int proc_vci(char *buffer)
+{
+ return sprintf(buffer,
+ "VCIVersion:\t%04x:%04x\n"
+ "VCISyscall:\t%d\n"
+ "VCIKernel:\t%08x\n",
+ VCI_VERSION >> 16,
+ VCI_VERSION & 0xFFFF,
+ __NR_vserver,
+ vci_kernel_config());
+}
+
+static int proc_virtual_info(char *buffer)
+{
+ return proc_vci(buffer);
+}
+
+static int proc_virtual_status(char *buffer)
+{
+ return sprintf(buffer,
+ "#CTotal:\t%d\n"
+ "#CActive:\t%d\n"
+ "#NSProxy:\t%d\t%d %d %d %d %d %d\n",
+ atomic_read(&vx_global_ctotal),
+ atomic_read(&vx_global_cactive),
+ atomic_read(&vs_global_nsproxy),
+ atomic_read(&vs_global_fs),
+ atomic_read(&vs_global_mnt_ns),
+ atomic_read(&vs_global_uts_ns),
+ atomic_read(&vs_global_ipc_ns),
+ atomic_read(&vs_global_user_ns),
+ atomic_read(&vs_global_pid_ns));
+}
+
+
+int proc_vxi_info(struct vx_info *vxi, char *buffer)
+{
+ int length;
+
+ length = sprintf(buffer,
+ "ID:\t%d\n"
+ "Info:\t%p\n"
+ "Init:\t%d\n"
+ "OOM:\t%lld\n",
+ vxi->vx_id,
+ vxi,
+ vxi->vx_initpid,
+ vxi->vx_badness_bias);
+ return length;
+}
+
+int proc_vxi_status(struct vx_info *vxi, char *buffer)
+{
+ char *orig = buffer;
+
+ buffer += sprintf(buffer,
+ "UseCnt:\t%d\n"
+ "Tasks:\t%d\n"
+ "Flags:\t%016llx\n",
+ atomic_read(&vxi->vx_usecnt),
+ atomic_read(&vxi->vx_tasks),
+ (unsigned long long)vxi->vx_flags);
+
+ buffer += sprintf(buffer, "BCaps:\t");
+ buffer = print_cap_t(buffer, &vxi->vx_bcaps);
+ buffer += sprintf(buffer, "\n");
+
+ buffer += sprintf(buffer,
+ "CCaps:\t%016llx\n"
+ "Spaces:\t%08lx\n",
+ (unsigned long long)vxi->vx_ccaps,
+ vxi->vx_nsmask);
+ return buffer - orig;
+}
+
+int proc_vxi_limit(struct vx_info *vxi, char *buffer)
+{
+ return vx_info_proc_limit(&vxi->limit, buffer);
+}
+
+int proc_vxi_sched(struct vx_info *vxi, char *buffer)
+{
+ int cpu, length;
+
+ length = vx_info_proc_sched(&vxi->sched, buffer);
+ for_each_online_cpu(cpu) {
+ length += vx_info_proc_sched_pc(
+ &vx_per_cpu(vxi, sched_pc, cpu),
+ buffer + length, cpu);
+ }
+ return length;
+}
+
+int proc_vxi_nsproxy(struct vx_info *vxi, char *buffer)
+{
+ return vx_info_proc_nsproxy(vxi->vx_nsproxy, buffer);
+}
+
+int proc_vxi_cvirt(struct vx_info *vxi, char *buffer)
+{
+ int cpu, length;
+
+ vx_update_load(vxi);
+ length = vx_info_proc_cvirt(&vxi->cvirt, buffer);
+ for_each_online_cpu(cpu) {
+ length += vx_info_proc_cvirt_pc(
+ &vx_per_cpu(vxi, cvirt_pc, cpu),
+ buffer + length, cpu);
+ }
+ return length;
+}
+
+int proc_vxi_cacct(struct vx_info *vxi, char *buffer)
+{
+ return vx_info_proc_cacct(&vxi->cacct, buffer);
+}
+
+
+static int proc_virtnet_info(char *buffer)
+{
+ return proc_vci(buffer);
+}
+
+static int proc_virtnet_status(char *buffer)
+{
+ return sprintf(buffer,
+ "#CTotal:\t%d\n"
+ "#CActive:\t%d\n",
+ atomic_read(&nx_global_ctotal),
+ atomic_read(&nx_global_cactive));
+}
+
+int proc_nxi_info(struct nx_info *nxi, char *buffer)
+{
+ struct nx_addr_v4 *v4a;
+#ifdef CONFIG_IPV6
+ struct nx_addr_v6 *v6a;
+#endif
+ int length, i;
+
+ length = sprintf(buffer,
+ "ID:\t%d\n"
+ "Info:\t%p\n"
+ "Bcast:\t" NIPQUAD_FMT "\n"
+ "Lback:\t" NIPQUAD_FMT "\n",
+ nxi->nx_id,
+ nxi,
+ NIPQUAD(nxi->v4_bcast.s_addr),
+ NIPQUAD(nxi->v4_lback.s_addr));
+
+ if (!NX_IPV4(nxi))
+ goto skip_v4;
+ for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
+ length += sprintf(buffer + length, "%d:\t" NXAV4_FMT "\n",
+ i, NXAV4(v4a));
+skip_v4:
+#ifdef CONFIG_IPV6
+ if (!NX_IPV6(nxi))
+ goto skip_v6;
+ for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
+ length += sprintf(buffer + length, "%d:\t" NXAV6_FMT "\n",
+ i, NXAV6(v6a));
+skip_v6:
+#endif
+ return length;
+}
+
+int proc_nxi_status(struct nx_info *nxi, char *buffer)
+{
+ int length;
+
+ length = sprintf(buffer,
+ "UseCnt:\t%d\n"
+ "Tasks:\t%d\n"
+ "Flags:\t%016llx\n"
+ "NCaps:\t%016llx\n",
+ atomic_read(&nxi->nx_usecnt),
+ atomic_read(&nxi->nx_tasks),
+ (unsigned long long)nxi->nx_flags,
+ (unsigned long long)nxi->nx_ncaps);
+ return length;
+}
+
+
+
+/* here the inode helpers */
+
+struct vs_entry {
+ int len;
+ char *name;
+ mode_t mode;
+ struct inode_operations *iop;
+ struct file_operations *fop;
+ union proc_op op;
+};
+
+static struct inode *vs_proc_make_inode(struct super_block *sb, struct vs_entry *p)
+{
+ struct inode *inode = new_inode(sb);
+
+ if (!inode)
+ goto out;
+
+ inode->i_mode = p->mode;
+ if (p->iop)
+ inode->i_op = p->iop;
+ if (p->fop)
+ inode->i_fop = p->fop;
+
+ inode->i_nlink = (p->mode & S_IFDIR) ? 2 : 1;
+ inode->i_flags |= S_IMMUTABLE;
+
+ inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
+
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+ inode->i_tag = 0;
+out:
+ return inode;
+}
+
+static struct dentry *vs_proc_instantiate(struct inode *dir,
+ struct dentry *dentry, int id, void *ptr)
+{
+ struct vs_entry *p = ptr;
+ struct inode *inode = vs_proc_make_inode(dir->i_sb, p);
+ struct dentry *error = ERR_PTR(-EINVAL);
+
+ if (!inode)
+ goto out;
+
+ PROC_I(inode)->op = p->op;
+ PROC_I(inode)->fd = id;
+ d_add(dentry, inode);
+ error = NULL;
+out:
+ return error;
+}
+
+/* Lookups */
+
+typedef struct dentry *instantiate_t(struct inode *, struct dentry *, int, void *);
+
+/*
+ * Fill a directory entry.
+ *
+ * If possible create the dcache entry and derive our inode number and
+ * file type from dcache entry.
+ *
+ * Since all of the proc inode numbers are dynamically generated, the inode
+ * numbers do not exist until the inode is cache. This means creating the
+ * the dcache entry in readdir is necessary to keep the inode numbers
+ * reported by readdir in sync with the inode numbers reported
+ * by stat.
+ */
+static int proc_fill_cache(struct file *filp, void *dirent, filldir_t filldir,
+ char *name, int len, instantiate_t instantiate, int id, void *ptr)
+{
+ struct dentry *child, *dir = filp->f_dentry;
+ struct inode *inode;
+ struct qstr qname;
+ ino_t ino = 0;
+ unsigned type = DT_UNKNOWN;
+
+ qname.name = name;
+ qname.len = len;
+ qname.hash = full_name_hash(name, len);
+
+ child = d_lookup(dir, &qname);
+ if (!child) {
+ struct dentry *new;
+ new = d_alloc(dir, &qname);
+ if (new) {
+ child = instantiate(dir->d_inode, new, id, ptr);
+ if (child)
+ dput(new);
+ else
+ child = new;
+ }
+ }
+ if (!child || IS_ERR(child) || !child->d_inode)
+ goto end_instantiate;
+ inode = child->d_inode;
+ if (inode) {
+ ino = inode->i_ino;
+ type = inode->i_mode >> 12;
+ }
+ dput(child);
+end_instantiate:
+ if (!ino)
+ ino = find_inode_number(dir, &qname);
+ if (!ino)
+ ino = 1;
+ return filldir(dirent, name, len, filp->f_pos, ino, type);
+}
+
+
+
+/* get and revalidate vx_info/xid */
+
+static inline
+struct vx_info *get_proc_vx_info(struct inode *inode)
+{
+ return lookup_vx_info(PROC_I(inode)->fd);
+}
+
+static int proc_xid_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ struct inode *inode = dentry->d_inode;
+ xid_t xid = PROC_I(inode)->fd;
+
+ if (!xid || xid_is_hashed(xid))
+ return 1;
+ d_drop(dentry);
+ return 0;
+}
+
+
+/* get and revalidate nx_info/nid */
+
+static int proc_nid_revalidate(struct dentry *dentry, struct nameidata *nd)
+{
+ struct inode *inode = dentry->d_inode;
+ nid_t nid = PROC_I(inode)->fd;
+
+ if (!nid || nid_is_hashed(nid))
+ return 1;
+ d_drop(dentry);
+ return 0;
+}
+
+
+
+#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
+
+static ssize_t proc_vs_info_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ unsigned long page;
+ ssize_t length = 0;
+
+ if (count > PROC_BLOCK_SIZE)
+ count = PROC_BLOCK_SIZE;
+
+ /* fade that out as soon as stable */
+ WARN_ON(PROC_I(inode)->fd);
+
+ if (!(page = __get_free_page(GFP_KERNEL)))
+ return -ENOMEM;
+
+ BUG_ON(!PROC_I(inode)->op.proc_vs_read);
+ length = PROC_I(inode)->op.proc_vs_read((char *)page);
+
+ if (length >= 0)
+ length = simple_read_from_buffer(buf, count, ppos,
+ (char *)page, length);
+
+ free_page(page);
+ return length;
+}
+
+static ssize_t proc_vx_info_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct vx_info *vxi = NULL;
+ xid_t xid = PROC_I(inode)->fd;
+ unsigned long page;
+ ssize_t length = 0;
+
+ if (count > PROC_BLOCK_SIZE)
+ count = PROC_BLOCK_SIZE;
+
+ /* fade that out as soon as stable */
+ WARN_ON(!xid);
+ vxi = lookup_vx_info(xid);
+ if (!vxi)
+ goto out;
+
+ length = -ENOMEM;
+ if (!(page = __get_free_page(GFP_KERNEL)))
+ goto out_put;
+
+ BUG_ON(!PROC_I(inode)->op.proc_vxi_read);
+ length = PROC_I(inode)->op.proc_vxi_read(vxi, (char *)page);
+
+ if (length >= 0)
+ length = simple_read_from_buffer(buf, count, ppos,
+ (char *)page, length);
+
+ free_page(page);
+out_put:
+ put_vx_info(vxi);
+out:
+ return length;
+}
+
+static ssize_t proc_nx_info_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct inode *inode = file->f_dentry->d_inode;
+ struct nx_info *nxi = NULL;
+ nid_t nid = PROC_I(inode)->fd;
+ unsigned long page;
+ ssize_t length = 0;
+
+ if (count > PROC_BLOCK_SIZE)
+ count = PROC_BLOCK_SIZE;
+
+ /* fade that out as soon as stable */
+ WARN_ON(!nid);
+ nxi = lookup_nx_info(nid);
+ if (!nxi)
+ goto out;
+
+ length = -ENOMEM;
+ if (!(page = __get_free_page(GFP_KERNEL)))
+ goto out_put;
+
+ BUG_ON(!PROC_I(inode)->op.proc_nxi_read);
+ length = PROC_I(inode)->op.proc_nxi_read(nxi, (char *)page);
+
+ if (length >= 0)
+ length = simple_read_from_buffer(buf, count, ppos,
+ (char *)page, length);
+
+ free_page(page);
+out_put:
+ put_nx_info(nxi);
+out:
+ return length;
+}
+
+
+
+/* here comes the lower level */
+
+
+#define NOD(NAME, MODE, IOP, FOP, OP) { \
+ .len = sizeof(NAME) - 1, \
+ .name = (NAME), \
+ .mode = MODE, \
+ .iop = IOP, \
+ .fop = FOP, \
+ .op = OP, \
+}
+
+
+#define DIR(NAME, MODE, OTYPE) \
+ NOD(NAME, (S_IFDIR | (MODE)), \
+ &proc_ ## OTYPE ## _inode_operations, \
+ &proc_ ## OTYPE ## _file_operations, { } )
+
+#define INF(NAME, MODE, OTYPE) \
+ NOD(NAME, (S_IFREG | (MODE)), NULL, \
+ &proc_vs_info_file_operations, \
+ { .proc_vs_read = &proc_##OTYPE } )
+
+#define VINF(NAME, MODE, OTYPE) \
+ NOD(NAME, (S_IFREG | (MODE)), NULL, \
+ &proc_vx_info_file_operations, \
+ { .proc_vxi_read = &proc_##OTYPE } )
+
+#define NINF(NAME, MODE, OTYPE) \
+ NOD(NAME, (S_IFREG | (MODE)), NULL, \
+ &proc_nx_info_file_operations, \
+ { .proc_nxi_read = &proc_##OTYPE } )
+
+
+static struct file_operations proc_vs_info_file_operations = {
+ .read = proc_vs_info_read,
+};
+
+static struct file_operations proc_vx_info_file_operations = {
+ .read = proc_vx_info_read,
+};
+
+static struct dentry_operations proc_xid_dentry_operations = {
+ .d_revalidate = proc_xid_revalidate,
+};
+
+static struct vs_entry vx_base_stuff[] = {
+ VINF("info", S_IRUGO, vxi_info),
+ VINF("status", S_IRUGO, vxi_status),
+ VINF("limit", S_IRUGO, vxi_limit),
+ VINF("sched", S_IRUGO, vxi_sched),
+ VINF("nsproxy", S_IRUGO, vxi_nsproxy),
+ VINF("cvirt", S_IRUGO, vxi_cvirt),
+ VINF("cacct", S_IRUGO, vxi_cacct),
+ {}
+};
+
+
+
+
+static struct dentry *proc_xid_instantiate(struct inode *dir,
+ struct dentry *dentry, int id, void *ptr)
+{
+ dentry->d_op = &proc_xid_dentry_operations;
+ return vs_proc_instantiate(dir, dentry, id, ptr);
+}
+
+static struct dentry *proc_xid_lookup(struct inode *dir,
+ struct dentry *dentry, struct nameidata *nd)
+{
+ struct vs_entry *p = vx_base_stuff;
+ struct dentry *error = ERR_PTR(-ENOENT);
+
+ for (; p->name; p++) {
+ if (p->len != dentry->d_name.len)
+ continue;
+ if (!memcmp(dentry->d_name.name, p->name, p->len))
+ break;
+ }
+ if (!p->name)
+ goto out;
+
+ error = proc_xid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
+out:
+ return error;
+}
+
+static int proc_xid_readdir(struct file *filp,
+ void *dirent, filldir_t filldir)
+{
+ struct dentry *dentry = filp->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ struct vs_entry *p = vx_base_stuff;
+ int size = sizeof(vx_base_stuff) / sizeof(struct vs_entry);
+ int pos, index;
+ u64 ino;
+
+ pos = filp->f_pos;
+ switch (pos) {
+ case 0:
+ ino = inode->i_ino;
+ if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+ goto out;
+ pos++;
+ /* fall through */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+ goto out;
+ pos++;
+ /* fall through */
+ default:
+ index = pos - 2;
+ if (index >= size)
+ goto out;
+ for (p += index; p->name; p++) {
+ if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
+ vs_proc_instantiate, PROC_I(inode)->fd, p))
+ goto out;
+ pos++;
+ }
+ }
+out:
+ filp->f_pos = pos;
+ return 1;
+}
+
+
+
+static struct file_operations proc_nx_info_file_operations = {
+ .read = proc_nx_info_read,
+};
+
+static struct dentry_operations proc_nid_dentry_operations = {
+ .d_revalidate = proc_nid_revalidate,
+};
+
+static struct vs_entry nx_base_stuff[] = {
+ NINF("info", S_IRUGO, nxi_info),
+ NINF("status", S_IRUGO, nxi_status),
+ {}
+};
+
+
+static struct dentry *proc_nid_instantiate(struct inode *dir,
+ struct dentry *dentry, int id, void *ptr)
+{
+ dentry->d_op = &proc_nid_dentry_operations;
+ return vs_proc_instantiate(dir, dentry, id, ptr);
+}
+
+static struct dentry *proc_nid_lookup(struct inode *dir,
+ struct dentry *dentry, struct nameidata *nd)
+{
+ struct vs_entry *p = nx_base_stuff;
+ struct dentry *error = ERR_PTR(-ENOENT);
+
+ for (; p->name; p++) {
+ if (p->len != dentry->d_name.len)
+ continue;
+ if (!memcmp(dentry->d_name.name, p->name, p->len))
+ break;
+ }
+ if (!p->name)
+ goto out;
+
+ error = proc_nid_instantiate(dir, dentry, PROC_I(dir)->fd, p);
+out:
+ return error;
+}
+
+static int proc_nid_readdir(struct file *filp,
+ void *dirent, filldir_t filldir)
+{
+ struct dentry *dentry = filp->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ struct vs_entry *p = nx_base_stuff;
+ int size = sizeof(nx_base_stuff) / sizeof(struct vs_entry);
+ int pos, index;
+ u64 ino;
+
+ pos = filp->f_pos;
+ switch (pos) {
+ case 0:
+ ino = inode->i_ino;
+ if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+ goto out;
+ pos++;
+ /* fall through */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+ goto out;
+ pos++;
+ /* fall through */
+ default:
+ index = pos - 2;
+ if (index >= size)
+ goto out;
+ for (p += index; p->name; p++) {
+ if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
+ vs_proc_instantiate, PROC_I(inode)->fd, p))
+ goto out;
+ pos++;
+ }
+ }
+out:
+ filp->f_pos = pos;
+ return 1;
+}
+
+
+#define MAX_MULBY10 ((~0U - 9) / 10)
+
+static inline int atovid(const char *str, int len)
+{
+ int vid, c;
+
+ vid = 0;
+ while (len-- > 0) {
+ c = *str - '0';
+ str++;
+ if (c > 9)
+ return -1;
+ if (vid >= MAX_MULBY10)
+ return -1;
+ vid *= 10;
+ vid += c;
+ if (!vid)
+ return -1;
+ }
+ return vid;
+}
+
+/* now the upper level (virtual) */
+
+
+static struct file_operations proc_xid_file_operations = {
+ .read = generic_read_dir,
+ .readdir = proc_xid_readdir,
+};
+
+static struct inode_operations proc_xid_inode_operations = {
+ .lookup = proc_xid_lookup,
+};
+
+static struct vs_entry vx_virtual_stuff[] = {
+ INF("info", S_IRUGO, virtual_info),
+ INF("status", S_IRUGO, virtual_status),
+ DIR(NULL, S_IRUGO | S_IXUGO, xid),
+};
+
+
+static struct dentry *proc_virtual_lookup(struct inode *dir,
+ struct dentry *dentry, struct nameidata *nd)
+{
+ struct vs_entry *p = vx_virtual_stuff;
+ struct dentry *error = ERR_PTR(-ENOENT);
+ int id = 0;
+
+ for (; p->name; p++) {
+ if (p->len != dentry->d_name.len)
+ continue;
+ if (!memcmp(dentry->d_name.name, p->name, p->len))
+ break;
+ }
+ if (p->name)
+ goto instantiate;
+
+ id = atovid(dentry->d_name.name, dentry->d_name.len);
+ if ((id < 0) || !xid_is_hashed(id))
+ goto out;
+
+instantiate:
+ error = proc_xid_instantiate(dir, dentry, id, p);
+out:
+ return error;
+}
+
+static struct file_operations proc_nid_file_operations = {
+ .read = generic_read_dir,
+ .readdir = proc_nid_readdir,
+};
+
+static struct inode_operations proc_nid_inode_operations = {
+ .lookup = proc_nid_lookup,
+};
+
+static struct vs_entry nx_virtnet_stuff[] = {
+ INF("info", S_IRUGO, virtnet_info),
+ INF("status", S_IRUGO, virtnet_status),
+ DIR(NULL, S_IRUGO | S_IXUGO, nid),
+};
+
+
+static struct dentry *proc_virtnet_lookup(struct inode *dir,
+ struct dentry *dentry, struct nameidata *nd)
+{
+ struct vs_entry *p = nx_virtnet_stuff;
+ struct dentry *error = ERR_PTR(-ENOENT);
+ int id = 0;
+
+ for (; p->name; p++) {
+ if (p->len != dentry->d_name.len)
+ continue;
+ if (!memcmp(dentry->d_name.name, p->name, p->len))
+ break;
+ }
+ if (p->name)
+ goto instantiate;
+
+ id = atovid(dentry->d_name.name, dentry->d_name.len);
+ if ((id < 0) || !nid_is_hashed(id))
+ goto out;
+
+instantiate:
+ error = proc_nid_instantiate(dir, dentry, id, p);
+out:
+ return error;
+}
+
+
+#define PROC_MAXVIDS 32
+
+int proc_virtual_readdir(struct file *filp,
+ void *dirent, filldir_t filldir)
+{
+ struct dentry *dentry = filp->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ struct vs_entry *p = vx_virtual_stuff;
+ int size = sizeof(vx_virtual_stuff) / sizeof(struct vs_entry);
+ int pos, index;
+ unsigned int xid_array[PROC_MAXVIDS];
+ char buf[PROC_NUMBUF];
+ unsigned int nr_xids, i;
+ u64 ino;
+
+ pos = filp->f_pos;
+ switch (pos) {
+ case 0:
+ ino = inode->i_ino;
+ if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+ goto out;
+ pos++;
+ /* fall through */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+ goto out;
+ pos++;
+ /* fall through */
+ default:
+ index = pos - 2;
+ if (index >= size)
+ goto entries;
+ for (p += index; p->name; p++) {
+ if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
+ vs_proc_instantiate, 0, p))
+ goto out;
+ pos++;
+ }
+ entries:
+ index = pos - size;
+ p = &vx_virtual_stuff[size - 1];
+ nr_xids = get_xid_list(index, xid_array, PROC_MAXVIDS);
+ for (i = 0; i < nr_xids; i++) {
+ int n, xid = xid_array[i];
+ unsigned int j = PROC_NUMBUF;
+
+ n = xid;
+ do
+ buf[--j] = '0' + (n % 10);
+ while (n /= 10);
+
+ if (proc_fill_cache(filp, dirent, filldir,
+ buf + j, PROC_NUMBUF - j,
+ vs_proc_instantiate, xid, p))
+ goto out;
+ pos++;
+ }
+ }
+out:
+ filp->f_pos = pos;
+ return 0;
+}
+
+static int proc_virtual_getattr(struct vfsmount *mnt,
+ struct dentry *dentry, struct kstat *stat)
+{
+ struct inode *inode = dentry->d_inode;
+
+ generic_fillattr(inode, stat);
+ stat->nlink = 2 + atomic_read(&vx_global_cactive);
+ return 0;
+}
+
+static struct file_operations proc_virtual_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = proc_virtual_readdir,
+};
+
+static struct inode_operations proc_virtual_dir_inode_operations = {
+ .getattr = proc_virtual_getattr,
+ .lookup = proc_virtual_lookup,
+};
+
+
+
+
+
+int proc_virtnet_readdir(struct file *filp,
+ void *dirent, filldir_t filldir)
+{
+ struct dentry *dentry = filp->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ struct vs_entry *p = nx_virtnet_stuff;
+ int size = sizeof(nx_virtnet_stuff) / sizeof(struct vs_entry);
+ int pos, index;
+ unsigned int nid_array[PROC_MAXVIDS];
+ char buf[PROC_NUMBUF];
+ unsigned int nr_nids, i;
+ u64 ino;
+
+ pos = filp->f_pos;
+ switch (pos) {
+ case 0:
+ ino = inode->i_ino;
+ if (filldir(dirent, ".", 1, pos, ino, DT_DIR) < 0)
+ goto out;
+ pos++;
+ /* fall through */
+ case 1:
+ ino = parent_ino(dentry);
+ if (filldir(dirent, "..", 2, pos, ino, DT_DIR) < 0)
+ goto out;
+ pos++;
+ /* fall through */
+ default:
+ index = pos - 2;
+ if (index >= size)
+ goto entries;
+ for (p += index; p->name; p++) {
+ if (proc_fill_cache(filp, dirent, filldir, p->name, p->len,
+ vs_proc_instantiate, 0, p))
+ goto out;
+ pos++;
+ }
+ entries:
+ index = pos - size;
+ p = &nx_virtnet_stuff[size - 1];
+ nr_nids = get_nid_list(index, nid_array, PROC_MAXVIDS);
+ for (i = 0; i < nr_nids; i++) {
+ int n, nid = nid_array[i];
+ unsigned int j = PROC_NUMBUF;
+
+ n = nid;
+ do
+ buf[--j] = '0' + (n % 10);
+ while (n /= 10);
+
+ if (proc_fill_cache(filp, dirent, filldir,
+ buf + j, PROC_NUMBUF - j,
+ vs_proc_instantiate, nid, p))
+ goto out;
+ pos++;
+ }
+ }
+out:
+ filp->f_pos = pos;
+ return 0;
+}
+
+static int proc_virtnet_getattr(struct vfsmount *mnt,
+ struct dentry *dentry, struct kstat *stat)
+{
+ struct inode *inode = dentry->d_inode;
+
+ generic_fillattr(inode, stat);
+ stat->nlink = 2 + atomic_read(&nx_global_cactive);
+ return 0;
+}
+
+static struct file_operations proc_virtnet_dir_operations = {
+ .read = generic_read_dir,
+ .readdir = proc_virtnet_readdir,
+};
+
+static struct inode_operations proc_virtnet_dir_inode_operations = {
+ .getattr = proc_virtnet_getattr,
+ .lookup = proc_virtnet_lookup,
+};
+
+
+
+void proc_vx_init(void)
+{
+ struct proc_dir_entry *ent;
+
+ ent = proc_mkdir("virtual", 0);
+ if (ent) {
+ ent->proc_fops = &proc_virtual_dir_operations;
+ ent->proc_iops = &proc_virtual_dir_inode_operations;
+ }
+ proc_virtual = ent;
+
+ ent = proc_mkdir("virtnet", 0);
+ if (ent) {
+ ent->proc_fops = &proc_virtnet_dir_operations;
+ ent->proc_iops = &proc_virtnet_dir_inode_operations;
+ }
+ proc_virtnet = ent;
+}
+
+
+
+
+/* per pid info */
+
+
+int proc_pid_vx_info(struct task_struct *p, char *buffer)
+{
+ struct vx_info *vxi;
+ char *orig = buffer;
+
+ buffer += sprintf(buffer, "XID:\t%d\n", vx_task_xid(p));
+
+ vxi = task_get_vx_info(p);
+ if (!vxi)
+ goto out;
+
+ buffer += sprintf(buffer, "BCaps:\t");
+ buffer = print_cap_t(buffer, &vxi->vx_bcaps);
+ buffer += sprintf(buffer, "\n");
+ buffer += sprintf(buffer, "CCaps:\t%016llx\n",
+ (unsigned long long)vxi->vx_ccaps);
+ buffer += sprintf(buffer, "CFlags:\t%016llx\n",
+ (unsigned long long)vxi->vx_flags);
+ buffer += sprintf(buffer, "CIPid:\t%d\n", vxi->vx_initpid);
+
+ put_vx_info(vxi);
+out:
+ return buffer - orig;
+}
+
+
+int proc_pid_nx_info(struct task_struct *p, char *buffer)
+{
+ struct nx_info *nxi;
+ struct nx_addr_v4 *v4a;
+#ifdef CONFIG_IPV6
+ struct nx_addr_v6 *v6a;
+#endif
+ char *orig = buffer;
+ int i;
+
+ buffer += sprintf(buffer, "NID:\t%d\n", nx_task_nid(p));
+
+ nxi = task_get_nx_info(p);
+ if (!nxi)
+ goto out;
+
+ buffer += sprintf(buffer, "NCaps:\t%016llx\n",
+ (unsigned long long)nxi->nx_ncaps);
+ buffer += sprintf(buffer, "NFlags:\t%016llx\n",
+ (unsigned long long)nxi->nx_flags);
+
+ buffer += sprintf(buffer,
+ "V4Root[bcast]:\t" NIPQUAD_FMT "\n",
+ NIPQUAD(nxi->v4_bcast.s_addr));
+ buffer += sprintf (buffer,
+ "V4Root[lback]:\t" NIPQUAD_FMT "\n",
+ NIPQUAD(nxi->v4_lback.s_addr));
+ if (!NX_IPV4(nxi))
+ goto skip_v4;
+ for (i = 0, v4a = &nxi->v4; v4a; i++, v4a = v4a->next)
+ buffer += sprintf(buffer, "V4Root[%d]:\t" NXAV4_FMT "\n",
+ i, NXAV4(v4a));
+skip_v4:
+#ifdef CONFIG_IPV6
+ if (!NX_IPV6(nxi))
+ goto skip_v6;
+ for (i = 0, v6a = &nxi->v6; v6a; i++, v6a = v6a->next)
+ buffer += sprintf(buffer, "V6Root[%d]:\t" NXAV6_FMT "\n",
+ i, NXAV6(v6a));
+skip_v6:
+#endif
+ put_nx_info(nxi);
+out:
+ return buffer - orig;
+}
+
--- a/kernel/vserver/sched.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/sched.c 2008-04-29 18:40:09.000000000 -0400
@@ -0,0 +1,413 @@
+/*
+ * linux/kernel/vserver/sched.c
+ *
+ * Virtual Server: Scheduler Support
+ *
+ * Copyright (C) 2004-2007 Herbert P<>tzl
+ *
+ * V0.01 adapted Sam Vilains version to 2.6.3
+ * V0.02 removed legacy interface
+ * V0.03 changed vcmds to vxi arg
+ * V0.04 removed older and legacy interfaces
+ *
+ */
+
+#include <linux/vs_context.h>
+#include <linux/vs_sched.h>
+#include <linux/vserver/sched_cmd.h>
+
+#include <asm/uaccess.h>
+
+
+#define vxd_check_range(val, min, max) do { \
+ vxlprintk((val < min) || (val > max), \
+ "check_range(%ld,%ld,%ld)", \
+ (long)val, (long)min, (long)max, \
+ __FILE__, __LINE__); \
+ } while (0)
+
+
+void vx_update_sched_param(struct _vx_sched *sched,
+ struct _vx_sched_pc *sched_pc)
+{
+ unsigned int set_mask = sched->update_mask;
+
+ if (set_mask & VXSM_FILL_RATE)
+ sched_pc->fill_rate[0] = sched->fill_rate[0];
+ if (set_mask & VXSM_INTERVAL)
+ sched_pc->interval[0] = sched->interval[0];
+ if (set_mask & VXSM_FILL_RATE2)
+ sched_pc->fill_rate[1] = sched->fill_rate[1];
+ if (set_mask & VXSM_INTERVAL2)
+ sched_pc->interval[1] = sched->interval[1];
+ if (set_mask & VXSM_TOKENS)
+ sched_pc->tokens = sched->tokens;
+ if (set_mask & VXSM_TOKENS_MIN)
+ sched_pc->tokens_min = sched->tokens_min;
+ if (set_mask & VXSM_TOKENS_MAX)
+ sched_pc->tokens_max = sched->tokens_max;
+ if (set_mask & VXSM_PRIO_BIAS)
+ sched_pc->prio_bias = sched->prio_bias;
+
+ if (set_mask & VXSM_IDLE_TIME)
+ sched_pc->flags |= VXSF_IDLE_TIME;
+ else
+ sched_pc->flags &= ~VXSF_IDLE_TIME;
+
+ /* reset time */
+ sched_pc->norm_time = jiffies;
+}
+
+
+/*
+ * recalculate the context's scheduling tokens
+ *
+ * ret > 0 : number of tokens available
+ * ret < 0 : on hold, check delta_min[]
+ * -1 only jiffies
+ * -2 also idle time
+ *
+ */
+int vx_tokens_recalc(struct _vx_sched_pc *sched_pc,
+ unsigned long *norm_time, unsigned long *idle_time, int delta_min[2])
+{
+ long delta;
+ long tokens = 0;
+ int flags = sched_pc->flags;
+
+ /* how much time did pass? */
+ delta = *norm_time - sched_pc->norm_time;
+ vxd_check_range(delta, 0, INT_MAX);
+
+ if (delta >= sched_pc->interval[0]) {
+ long tokens, integral;
+
+ /* calc integral token part */
+ tokens = delta / sched_pc->interval[0];
+ integral = tokens * sched_pc->interval[0];
+ tokens *= sched_pc->fill_rate[0];
+#ifdef CONFIG_VSERVER_HARDCPU
+ delta_min[0] = delta - integral;
+ vxd_check_range(delta_min[0], 0, sched_pc->interval[0]);
+#endif
+ /* advance time */
+ sched_pc->norm_time += delta;
+
+ /* add tokens */
+ sched_pc->tokens += tokens;
+ sched_pc->token_time += tokens;
+ } else
+ delta_min[0] = delta;
+
+#ifdef CONFIG_VSERVER_IDLETIME
+ if (!(flags & VXSF_IDLE_TIME))
+ goto skip_idle;
+
+ /* how much was the idle skip? */
+ delta = *idle_time - sched_pc->idle_time;
+ vxd_check_range(delta, 0, INT_MAX);
+
+ if (delta >= sched_pc->interval[1]) {
+ long tokens, integral;
+
+ /* calc fair share token part */
+ tokens = delta / sched_pc->interval[1];
+ integral = tokens * sched_pc->interval[1];
+ tokens *= sched_pc->fill_rate[1];
+ delta_min[1] = delta - integral;
+ vxd_check_range(delta_min[1], 0, sched_pc->interval[1]);
+
+ /* advance idle time */
+ sched_pc->idle_time += integral;
+
+ /* add tokens */
+ sched_pc->tokens += tokens;
+ sched_pc->token_time += tokens;
+ } else
+ delta_min[1] = delta;
+skip_idle:
+#endif
+
+ /* clip at maximum */
+ if (sched_pc->tokens > sched_pc->tokens_max)
+ sched_pc->tokens = sched_pc->tokens_max;
+ tokens = sched_pc->tokens;
+
+ if ((flags & VXSF_ONHOLD)) {
+ /* can we unhold? */
+ if (tokens >= sched_pc->tokens_min) {
+ flags &= ~VXSF_ONHOLD;
+ sched_pc->hold_ticks +=
+ *norm_time - sched_pc->onhold;
+ } else
+ goto on_hold;
+ } else {
+ /* put on hold? */
+ if (tokens <= 0) {
+ flags |= VXSF_ONHOLD;
+ sched_pc->onhold = *norm_time;
+ goto on_hold;
+ }
+ }
+ sched_pc->flags = flags;
+ return tokens;
+
+on_hold:
+ tokens = sched_pc->tokens_min - tokens;
+ sched_pc->flags = flags;
+ BUG_ON(tokens < 0);
+
+#ifdef CONFIG_VSERVER_HARDCPU
+ /* next interval? */
+ if (!sched_pc->fill_rate[0])
+ delta_min[0] = HZ;
+ else if (tokens > sched_pc->fill_rate[0])
+ delta_min[0] += sched_pc->interval[0] *
+ tokens / sched_pc->fill_rate[0];
+ else
+ delta_min[0] = sched_pc->interval[0] - delta_min[0];
+ vxd_check_range(delta_min[0], 0, INT_MAX);
+
+#ifdef CONFIG_VSERVER_IDLETIME
+ if (!(flags & VXSF_IDLE_TIME))
+ return -1;
+
+ /* next interval? */
+ if (!sched_pc->fill_rate[1])
+ delta_min[1] = HZ;
+ else if (tokens > sched_pc->fill_rate[1])
+ delta_min[1] += sched_pc->interval[1] *
+ tokens / sched_pc->fill_rate[1];
+ else
+ delta_min[1] = sched_pc->interval[1] - delta_min[1];
+ vxd_check_range(delta_min[1], 0, INT_MAX);
+
+ return -2;
+#else
+ return -1;
+#endif /* CONFIG_VSERVER_IDLETIME */
+#else
+ return 0;
+#endif /* CONFIG_VSERVER_HARDCPU */
+}
+
+static inline unsigned long msec_to_ticks(unsigned long msec)
+{
+ return msecs_to_jiffies(msec);
+}
+
+static inline unsigned long ticks_to_msec(unsigned long ticks)
+{
+ return jiffies_to_msecs(ticks);
+}
+
+static inline unsigned long ticks_to_usec(unsigned long ticks)
+{
+ return jiffies_to_usecs(ticks);
+}
+
+
+static int do_set_sched(struct vx_info *vxi, struct vcmd_sched_v5 *data)
+{
+ unsigned int set_mask = data->mask;
+ unsigned int update_mask;
+ int i, cpu;
+
+ /* Sanity check data values */
+ if (data->tokens_max <= 0)
+ data->tokens_max = HZ;
+ if (data->tokens_min < 0)
+ data->tokens_min = HZ / 3;
+ if (data->tokens_min >= data->tokens_max)
+ data->tokens_min = data->tokens_max;
+
+ if (data->prio_bias > MAX_PRIO_BIAS)
+ data->prio_bias = MAX_PRIO_BIAS;
+ if (data->prio_bias < MIN_PRIO_BIAS)
+ data->prio_bias = MIN_PRIO_BIAS;
+
+ spin_lock(&vxi->sched.tokens_lock);
+
+ /* sync up on delayed updates */
+ for_each_cpu_mask(cpu, vxi->sched.update)
+ vx_update_sched_param(&vxi->sched,
+ &vx_per_cpu(vxi, sched_pc, cpu));
+
+ if (set_mask & VXSM_FILL_RATE)
+ vxi->sched.fill_rate[0] = data->fill_rate[0];
+ if (set_mask & VXSM_FILL_RATE2)
+ vxi->sched.fill_rate[1] = data->fill_rate[1];
+ if (set_mask & VXSM_INTERVAL)
+ vxi->sched.interval[0] = (set_mask & VXSM_MSEC) ?
+ msec_to_ticks(data->interval[0]) : data->interval[0];
+ if (set_mask & VXSM_INTERVAL2)
+ vxi->sched.interval[1] = (set_mask & VXSM_MSEC) ?
+ msec_to_ticks(data->interval[1]) : data->interval[1];
+ if (set_mask & VXSM_TOKENS)
+ vxi->sched.tokens = data->tokens;
+ if (set_mask & VXSM_TOKENS_MIN)
+ vxi->sched.tokens_min = data->tokens_min;
+ if (set_mask & VXSM_TOKENS_MAX)
+ vxi->sched.tokens_max = data->tokens_max;
+ if (set_mask & VXSM_PRIO_BIAS)
+ vxi->sched.prio_bias = data->prio_bias;
+
+ /* Sanity check rate/interval */
+ for (i = 0; i < 2; i++) {
+ if (data->fill_rate[i] < 0)
+ data->fill_rate[i] = 0;
+ if (data->interval[i] <= 0)
+ data->interval[i] = HZ;
+ }
+
+ update_mask = vxi->sched.update_mask & VXSM_SET_MASK;
+ update_mask |= (set_mask & (VXSM_SET_MASK | VXSM_IDLE_TIME));
+ vxi->sched.update_mask = update_mask;
+
+#ifdef CONFIG_SMP
+ rmb();
+ if (set_mask & VXSM_CPU_ID) {
+ vxi->sched.update = cpumask_of_cpu(data->cpu_id);
+ cpus_and(vxi->sched.update, cpu_online_map,
+ vxi->sched.update);
+ } else
+ vxi->sched.update = cpu_online_map;
+
+ /* forced reload? */
+ if (set_mask & VXSM_FORCE) {
+ for_each_cpu_mask(cpu, vxi->sched.update)
+ vx_update_sched_param(&vxi->sched,
+ &vx_per_cpu(vxi, sched_pc, cpu));
+ vxi->sched.update = CPU_MASK_NONE;
+ }
+#else
+ /* on UP we update immediately */
+ vx_update_sched_param(&vxi->sched,
+ &vx_per_cpu(vxi, sched_pc, 0));
+#endif
+
+ spin_unlock(&vxi->sched.tokens_lock);
+ return 0;
+}
+
+
+#define COPY_IDS(C) C(cpu_id); C(bucket_id)
+#define COPY_PRI(C) C(prio_bias)
+#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max)
+#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]); \
+ C(fill_rate[1]); C(interval[1]);
+
+#define COPY_VALUE(name) vc_data.name = data->name
+
+static int do_set_sched_v4(struct vx_info *vxi, struct vcmd_set_sched_v4 *data)
+{
+ struct vcmd_sched_v5 vc_data;
+
+ vc_data.mask = data->set_mask;
+ COPY_IDS(COPY_VALUE);
+ COPY_PRI(COPY_VALUE);
+ COPY_TOK(COPY_VALUE);
+ vc_data.fill_rate[0] = vc_data.fill_rate[1] = data->fill_rate;
+ vc_data.interval[0] = vc_data.interval[1] = data->interval;
+ return do_set_sched(vxi, &vc_data);
+}
+
+int vc_set_sched_v4(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_set_sched_v4 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_set_sched_v4(vxi, &vc_data);
+}
+
+ /* latest interface is v5 */
+
+int vc_set_sched(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_sched_v5 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return do_set_sched(vxi, &vc_data);
+}
+
+
+#define COPY_PRI(C) C(prio_bias)
+#define COPY_TOK(C) C(tokens); C(tokens_min); C(tokens_max)
+#define COPY_FRI(C) C(fill_rate[0]); C(interval[0]); \
+ C(fill_rate[1]); C(interval[1]);
+
+#define COPY_VALUE(name) vc_data.name = data->name
+
+
+int vc_get_sched(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_sched_v5 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ if (vc_data.mask & VXSM_CPU_ID) {
+ int cpu = vc_data.cpu_id;
+ struct _vx_sched_pc *data;
+
+ if (!cpu_possible(cpu))
+ return -EINVAL;
+
+ data = &vx_per_cpu(vxi, sched_pc, cpu);
+ COPY_TOK(COPY_VALUE);
+ COPY_PRI(COPY_VALUE);
+ COPY_FRI(COPY_VALUE);
+
+ if (data->flags & VXSF_IDLE_TIME)
+ vc_data.mask |= VXSM_IDLE_TIME;
+ } else {
+ struct _vx_sched *data = &vxi->sched;
+
+ COPY_TOK(COPY_VALUE);
+ COPY_PRI(COPY_VALUE);
+ COPY_FRI(COPY_VALUE);
+ }
+
+ if (vc_data.mask & VXSM_MSEC) {
+ vc_data.interval[0] = ticks_to_msec(vc_data.interval[0]);
+ vc_data.interval[1] = ticks_to_msec(vc_data.interval[1]);
+ }
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
+
+int vc_sched_info(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_sched_info vc_data;
+ int cpu;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ cpu = vc_data.cpu_id;
+ if (!cpu_possible(cpu))
+ return -EINVAL;
+
+ if (vxi) {
+ struct _vx_sched_pc *sched_pc =
+ &vx_per_cpu(vxi, sched_pc, cpu);
+
+ vc_data.user_msec = ticks_to_msec(sched_pc->user_ticks);
+ vc_data.sys_msec = ticks_to_msec(sched_pc->sys_ticks);
+ vc_data.hold_msec = ticks_to_msec(sched_pc->hold_ticks);
+ vc_data.vavavoom = sched_pc->vavavoom;
+ }
+ vc_data.token_usec = ticks_to_usec(1);
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ return -EFAULT;
+ return 0;
+}
+
--- a/kernel/vserver/sched_init.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/sched_init.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,50 @@
+
+static inline void vx_info_init_sched(struct _vx_sched *sched)
+{
+ static struct lock_class_key tokens_lock_key;
+
+ /* scheduling; hard code starting values as constants */
+ sched->fill_rate[0] = 1;
+ sched->interval[0] = 4;
+ sched->fill_rate[1] = 1;
+ sched->interval[1] = 8;
+ sched->tokens = HZ >> 2;
+ sched->tokens_min = HZ >> 4;
+ sched->tokens_max = HZ >> 1;
+ sched->tokens_lock = SPIN_LOCK_UNLOCKED;
+ sched->prio_bias = 0;
+
+ lockdep_set_class(&sched->tokens_lock, &tokens_lock_key);
+}
+
+static inline
+void vx_info_init_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
+{
+ sched_pc->fill_rate[0] = 1;
+ sched_pc->interval[0] = 4;
+ sched_pc->fill_rate[1] = 1;
+ sched_pc->interval[1] = 8;
+ sched_pc->tokens = HZ >> 2;
+ sched_pc->tokens_min = HZ >> 4;
+ sched_pc->tokens_max = HZ >> 1;
+ sched_pc->prio_bias = 0;
+ sched_pc->vavavoom = 0;
+ sched_pc->token_time = 0;
+ sched_pc->idle_time = 0;
+ sched_pc->norm_time = jiffies;
+
+ sched_pc->user_ticks = 0;
+ sched_pc->sys_ticks = 0;
+ sched_pc->hold_ticks = 0;
+}
+
+static inline void vx_info_exit_sched(struct _vx_sched *sched)
+{
+ return;
+}
+
+static inline
+void vx_info_exit_sched_pc(struct _vx_sched_pc *sched_pc, int cpu)
+{
+ return;
+}
--- a/kernel/vserver/sched_proc.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/sched_proc.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,57 @@
+#ifndef _VX_SCHED_PROC_H
+#define _VX_SCHED_PROC_H
+
+
+static inline
+int vx_info_proc_sched(struct _vx_sched *sched, char *buffer)
+{
+ int length = 0;
+
+ length += sprintf(buffer,
+ "FillRate:\t%8d,%d\n"
+ "Interval:\t%8d,%d\n"
+ "TokensMin:\t%8d\n"
+ "TokensMax:\t%8d\n"
+ "PrioBias:\t%8d\n",
+ sched->fill_rate[0],
+ sched->fill_rate[1],
+ sched->interval[0],
+ sched->interval[1],
+ sched->tokens_min,
+ sched->tokens_max,
+ sched->prio_bias);
+ return length;
+}
+
+static inline
+int vx_info_proc_sched_pc(struct _vx_sched_pc *sched_pc,
+ char *buffer, int cpu)
+{
+ int length = 0;
+
+ length += sprintf(buffer + length,
+ "cpu %d: %lld %lld %lld %ld %ld", cpu,
+ (unsigned long long)sched_pc->user_ticks,
+ (unsigned long long)sched_pc->sys_ticks,
+ (unsigned long long)sched_pc->hold_ticks,
+ sched_pc->token_time,
+ sched_pc->idle_time);
+ length += sprintf(buffer + length,
+ " %c%c %d %d %d %d/%d %d/%d",
+ (sched_pc->flags & VXSF_ONHOLD) ? 'H' : 'R',
+ (sched_pc->flags & VXSF_IDLE_TIME) ? 'I' : '-',
+ sched_pc->tokens,
+ sched_pc->tokens_min,
+ sched_pc->tokens_max,
+ sched_pc->fill_rate[0],
+ sched_pc->interval[0],
+ sched_pc->fill_rate[1],
+ sched_pc->interval[1]);
+ length += sprintf(buffer + length,
+ " %d %d\n",
+ sched_pc->prio_bias,
+ sched_pc->vavavoom);
+ return length;
+}
+
+#endif /* _VX_SCHED_PROC_H */
--- a/kernel/vserver/signal.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/signal.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,132 @@
+/*
+ * linux/kernel/vserver/signal.c
+ *
+ * Virtual Server: Signal Support
+ *
+ * Copyright (C) 2003-2007 Herbert P<>tzl
+ *
+ * V0.01 broken out from vcontext V0.05
+ * V0.02 changed vcmds to vxi arg
+ * V0.03 adjusted siginfo for kill
+ *
+ */
+
+#include <asm/uaccess.h>
+
+#include <linux/vs_context.h>
+#include <linux/vs_pid.h>
+#include <linux/vserver/signal_cmd.h>
+
+
+int vx_info_kill(struct vx_info *vxi, int pid, int sig)
+{
+ int retval, count = 0;
+ struct task_struct *p;
+ struct siginfo *sip = SEND_SIG_PRIV;
+
+ retval = -ESRCH;
+ vxdprintk(VXD_CBIT(misc, 4),
+ "vx_info_kill(%p[#%d],%d,%d)*",
+ vxi, vxi->vx_id, pid, sig);
+ read_lock(&tasklist_lock);
+ switch (pid) {
+ case 0:
+ case -1:
+ for_each_process(p) {
+ int err = 0;
+
+ if (vx_task_xid(p) != vxi->vx_id || p->pid <= 1 ||
+ (pid && vxi->vx_initpid == p->pid))
+ continue;
+
+ err = group_send_sig_info(sig, sip, p);
+ ++count;
+ if (err != -EPERM)
+ retval = err;
+ }
+ break;
+
+ case 1:
+ if (vxi->vx_initpid) {
+ pid = vxi->vx_initpid;
+ /* for now, only SIGINT to private init ... */
+ if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
+ /* ... as long as there are tasks left */
+ (atomic_read(&vxi->vx_tasks) > 1))
+ sig = SIGINT;
+ }
+ /* fallthrough */
+ default:
+ p = find_task_by_real_pid(pid);
+ if (p) {
+ if (vx_task_xid(p) == vxi->vx_id)
+ retval = group_send_sig_info(sig, sip, p);
+ }
+ break;
+ }
+ read_unlock(&tasklist_lock);
+ vxdprintk(VXD_CBIT(misc, 4),
+ "vx_info_kill(%p[#%d],%d,%d,%ld) = %d",
+ vxi, vxi->vx_id, pid, sig, (long)sip, retval);
+ return retval;
+}
+
+int vc_ctx_kill(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_ctx_kill_v0 vc_data;
+
+ if (copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ /* special check to allow guest shutdown */
+ if (!vx_info_flags(vxi, VXF_STATE_ADMIN, 0) &&
+ /* forbid killall pid=0 when init is present */
+ (((vc_data.pid < 1) && vxi->vx_initpid) ||
+ (vc_data.pid > 1)))
+ return -EACCES;
+
+ return vx_info_kill(vxi, vc_data.pid, vc_data.sig);
+}
+
+
+static int __wait_exit(struct vx_info *vxi)
+{
+ DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
+
+ add_wait_queue(&vxi->vx_wait, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+wait:
+ if (vx_info_state(vxi,
+ VXS_SHUTDOWN | VXS_HASHED | VXS_HELPER) == VXS_SHUTDOWN)
+ goto out;
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+ schedule();
+ goto wait;
+
+out:
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&vxi->vx_wait, &wait);
+ return ret;
+}
+
+
+
+int vc_wait_exit(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_wait_exit_v0 vc_data;
+ int ret;
+
+ ret = __wait_exit(vxi);
+ vc_data.reboot_cmd = vxi->reboot_cmd;
+ vc_data.exit_code = vxi->exit_code;
+
+ if (copy_to_user(data, &vc_data, sizeof(vc_data)))
+ ret = -EFAULT;
+ return ret;
+}
+
--- a/kernel/vserver/space.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/space.c 2008-05-15 15:41:12.000000000 -0400
@@ -0,0 +1,316 @@
+/*
+ * linux/kernel/vserver/space.c
+ *
+ * Virtual Server: Context Space Support
+ *
+ * Copyright (C) 2003-2007 Herbert P<>tzl
+ *
+ * V0.01 broken out from context.c 0.07
+ * V0.02 added task locking for namespace
+ * V0.03 broken out vx_enter_namespace
+ * V0.04 added *space support and commands
+ *
+ */
+
+#include <linux/utsname.h>
+#include <linux/nsproxy.h>
+#include <linux/err.h>
+#include <asm/uaccess.h>
+
+#include <linux/vs_context.h>
+#include <linux/vserver/space.h>
+#include <linux/vserver/space_cmd.h>
+
+atomic_t vs_global_nsproxy = ATOMIC_INIT(0);
+atomic_t vs_global_fs = ATOMIC_INIT(0);
+atomic_t vs_global_mnt_ns = ATOMIC_INIT(0);
+atomic_t vs_global_uts_ns = ATOMIC_INIT(0);
+atomic_t vs_global_ipc_ns = ATOMIC_INIT(0);
+atomic_t vs_global_user_ns = ATOMIC_INIT(0);
+atomic_t vs_global_pid_ns = ATOMIC_INIT(0);
+
+
+/* namespace functions */
+
+#include <linux/mnt_namespace.h>
+#include <linux/user_namespace.h>
+#include <linux/pid_namespace.h>
+#include <linux/ipc_namespace.h>
+#include <net/net_namespace.h>
+
+const struct vcmd_space_mask space_mask = {
+ .mask = CLONE_NEWNS |
+ CLONE_NEWUTS |
+ CLONE_NEWIPC |
+ CLONE_NEWUSER |
+ CLONE_FS
+};
+
+
+/*
+ * build a new nsproxy mix
+ * assumes that both proxies are 'const'
+ * does not touch nsproxy refcounts
+ * will hold a reference on the result.
+ */
+
+struct nsproxy *vs_mix_nsproxy(struct nsproxy *old_nsproxy,
+ struct nsproxy *new_nsproxy, unsigned long mask)
+{
+ struct mnt_namespace *old_ns;
+ struct uts_namespace *old_uts;
+ struct ipc_namespace *old_ipc;
+ struct pid_namespace *old_pid;
+ struct user_namespace *old_user;
+ struct net *old_net;
+ struct nsproxy *nsproxy;
+
+ nsproxy = copy_nsproxy(old_nsproxy);
+ if (!nsproxy)
+ goto out;
+
+ if (mask & CLONE_NEWNS) {
+ old_ns = nsproxy->mnt_ns;
+ nsproxy->mnt_ns = new_nsproxy->mnt_ns;
+ if (nsproxy->mnt_ns)
+ get_mnt_ns(nsproxy->mnt_ns);
+ } else
+ old_ns = NULL;
+
+ if (mask & CLONE_NEWUTS) {
+ old_uts = nsproxy->uts_ns;
+ nsproxy->uts_ns = new_nsproxy->uts_ns;
+ if (nsproxy->uts_ns)
+ get_uts_ns(nsproxy->uts_ns);
+ } else
+ old_uts = NULL;
+
+ if (mask & CLONE_NEWIPC) {
+ old_ipc = nsproxy->ipc_ns;
+ nsproxy->ipc_ns = new_nsproxy->ipc_ns;
+ if (nsproxy->ipc_ns)
+ get_ipc_ns(nsproxy->ipc_ns);
+ } else
+ old_ipc = NULL;
+
+ if (mask & CLONE_NEWUSER) {
+ old_user = nsproxy->user_ns;
+ nsproxy->user_ns = new_nsproxy->user_ns;
+ if (nsproxy->user_ns)
+ get_user_ns(nsproxy->user_ns);
+ } else
+ old_user = NULL;
+
+ if (mask & CLONE_NEWPID) {
+ old_pid = nsproxy->pid_ns;
+ nsproxy->pid_ns = new_nsproxy->pid_ns;
+ if (nsproxy->pid_ns)
+ get_pid_ns(nsproxy->pid_ns);
+ } else
+ old_pid = NULL;
+
+ if (mask & CLONE_NEWNET) {
+ old_net = nsproxy->net_ns;
+ nsproxy->net_ns = new_nsproxy->net_ns;
+ if (nsproxy->net_ns)
+ get_net(nsproxy->net_ns);
+ } else
+ old_net = NULL;
+
+ if (old_ns)
+ put_mnt_ns(old_ns);
+ if (old_uts)
+ put_uts_ns(old_uts);
+ if (old_ipc)
+ put_ipc_ns(old_ipc);
+ if (old_pid)
+ put_pid_ns(old_pid);
+ if (old_user)
+ put_user_ns(old_user);
+ if (old_net)
+ put_net(old_net);
+out:
+ return nsproxy;
+}
+
+
+/*
+ * merge two nsproxy structs into a new one.
+ * will hold a reference on the result.
+ */
+
+static inline
+struct nsproxy *__vs_merge_nsproxy(struct nsproxy *old,
+ struct nsproxy *proxy, unsigned long mask)
+{
+ struct nsproxy null_proxy = { .mnt_ns = NULL };
+
+ if (!proxy)
+ return NULL;
+
+ if (mask) {
+ /* vs_mix_nsproxy returns with reference */
+ return vs_mix_nsproxy(old ? old : &null_proxy,
+ proxy, mask);
+ }
+ get_nsproxy(proxy);
+ return proxy;
+}
+
+/*
+ * merge two fs structs into a new one.
+ * will take a reference on the result.
+ */
+
+static inline
+struct fs_struct *__vs_merge_fs(struct fs_struct *old,
+ struct fs_struct *fs, unsigned long mask)
+{
+ if (!(mask & CLONE_FS)) {
+ if (old)
+ atomic_inc(&old->count);
+ return old;
+ }
+
+ if (!fs)
+ return NULL;
+
+ return copy_fs_struct(fs);
+}
+
+
+int vx_enter_space(struct vx_info *vxi, unsigned long mask)
+{
+ struct nsproxy *proxy, *proxy_cur, *proxy_new;
+ struct fs_struct *fs, *fs_cur, *fs_new;
+ int ret;
+
+ if (vx_info_flags(vxi, VXF_INFO_PRIVATE, 0))
+ return -EACCES;
+
+ if (!mask)
+ mask = vxi->vx_nsmask;
+
+ if ((mask & vxi->vx_nsmask) != mask)
+ return -EINVAL;
+
+ proxy = vxi->vx_nsproxy;
+ fs = vxi->vx_fs;
+
+ task_lock(current);
+ fs_cur = current->fs;
+ atomic_inc(&fs_cur->count);
+ proxy_cur = current->nsproxy;
+ get_nsproxy(proxy_cur);
+ task_unlock(current);
+
+ fs_new = __vs_merge_fs(fs_cur, fs, mask);
+ if (IS_ERR(fs_new)) {
+ ret = PTR_ERR(fs_new);
+ goto out_put;
+ }
+
+ proxy_new = __vs_merge_nsproxy(proxy_cur, proxy, mask);
+ if (IS_ERR(proxy_new)) {
+ ret = PTR_ERR(proxy_new);
+ goto out_put_fs;
+ }
+
+ fs_new = xchg(&current->fs, fs_new);
+ proxy_new = xchg(&current->nsproxy, proxy_new);
+ ret = 0;
+
+ if (proxy_new)
+ put_nsproxy(proxy_new);
+out_put_fs:
+ if (fs_new)
+ put_fs_struct(fs_new);
+out_put:
+ if (proxy_cur)
+ put_nsproxy(proxy_cur);
+ if (fs_cur)
+ put_fs_struct(fs_cur);
+ return ret;
+}
+
+
+int vx_set_space(struct vx_info *vxi, unsigned long mask)
+{
+ struct nsproxy *proxy_vxi, *proxy_cur, *proxy_new;
+ struct fs_struct *fs_vxi, *fs_cur, *fs_new;
+ int ret;
+
+ if (!mask)
+ mask = space_mask.mask;
+
+ if ((mask & space_mask.mask) != mask)
+ return -EINVAL;
+
+ proxy_vxi = vxi->vx_nsproxy;
+ fs_vxi = vxi->vx_fs;
+
+ task_lock(current);
+ fs_cur = current->fs;
+ atomic_inc(&fs_cur->count);
+ proxy_cur = current->nsproxy;
+ get_nsproxy(proxy_cur);
+ task_unlock(current);
+
+ fs_new = __vs_merge_fs(fs_vxi, fs_cur, mask);
+ if (IS_ERR(fs_new)) {
+ ret = PTR_ERR(fs_new);
+ goto out_put;
+ }
+
+ proxy_new = __vs_merge_nsproxy(proxy_vxi, proxy_cur, mask);
+ if (IS_ERR(proxy_new)) {
+ ret = PTR_ERR(proxy_new);
+ goto out_put_fs;
+ }
+
+ fs_new = xchg(&vxi->vx_fs, fs_new);
+ proxy_new = xchg(&vxi->vx_nsproxy, proxy_new);
+ vxi->vx_nsmask |= mask;
+ ret = 0;
+
+ if (proxy_new)
+ put_nsproxy(proxy_new);
+out_put_fs:
+ if (fs_new)
+ put_fs_struct(fs_new);
+out_put:
+ if (proxy_cur)
+ put_nsproxy(proxy_cur);
+ if (fs_cur)
+ put_fs_struct(fs_cur);
+ return ret;
+}
+
+
+int vc_enter_space(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_space_mask vc_data = { .mask = 0 };
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return vx_enter_space(vxi, vc_data.mask);
+}
+
+int vc_set_space(struct vx_info *vxi, void __user *data)
+{
+ struct vcmd_space_mask vc_data = { .mask = 0 };
+
+ if (data && copy_from_user(&vc_data, data, sizeof(vc_data)))
+ return -EFAULT;
+
+ return vx_set_space(vxi, vc_data.mask);
+}
+
+int vc_get_space_mask(struct vx_info *vxi, void __user *data)
+{
+ if (copy_to_user(data, &space_mask, sizeof(space_mask)))
+ return -EFAULT;
+ return 0;
+}
+
--- a/kernel/vserver/switch.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/switch.c 2008-04-29 18:40:18.000000000 -0400
@@ -0,0 +1,529 @@
+/*
+ * linux/kernel/vserver/switch.c
+ *
+ * Virtual Server: Syscall Switch
+ *
+ * Copyright (C) 2003-2007 Herbert P<>tzl
+ *
+ * V0.01 syscall switch
+ * V0.02 added signal to context
+ * V0.03 added rlimit functions
+ * V0.04 added iattr, task/xid functions
+ * V0.05 added debug/history stuff
+ * V0.06 added compat32 layer
+ * V0.07 vcmd args and perms
+ * V0.08 added status commands
+ * V0.09 added tag commands
+ * V0.10 added oom bias
+ * V0.11 added device commands
+ *
+ */
+
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vserver/switch.h>
+
+#include "vci_config.h"
+
+
+static inline
+int vc_get_version(uint32_t id)
+{
+ return VCI_VERSION;
+}
+
+static inline
+int vc_get_vci(uint32_t id)
+{
+ return vci_kernel_config();
+}
+
+#include <linux/vserver/context_cmd.h>
+#include <linux/vserver/cvirt_cmd.h>
+#include <linux/vserver/cacct_cmd.h>
+#include <linux/vserver/limit_cmd.h>
+#include <linux/vserver/network_cmd.h>
+#include <linux/vserver/sched_cmd.h>
+#include <linux/vserver/debug_cmd.h>
+#include <linux/vserver/inode_cmd.h>
+#include <linux/vserver/dlimit_cmd.h>
+#include <linux/vserver/signal_cmd.h>
+#include <linux/vserver/space_cmd.h>
+#include <linux/vserver/tag_cmd.h>
+#include <linux/vserver/device_cmd.h>
+
+#include <linux/vserver/inode.h>
+#include <linux/vserver/dlimit.h>
+
+
+#ifdef CONFIG_COMPAT
+#define __COMPAT(name, id, data, compat) \
+ (compat) ? name ## _x32(id, data) : name(id, data)
+#define __COMPAT_NO_ID(name, data, compat) \
+ (compat) ? name ## _x32(data) : name(data)
+#else
+#define __COMPAT(name, id, data, compat) \
+ name(id, data)
+#define __COMPAT_NO_ID(name, data, compat) \
+ name(data)
+#endif
+
+
+static inline
+long do_vcmd(uint32_t cmd, uint32_t id,
+ struct vx_info *vxi, struct nx_info *nxi,
+ void __user *data, int compat)
+{
+ switch (cmd) {
+
+ case VCMD_get_version:
+ return vc_get_version(id);
+ case VCMD_get_vci:
+ return vc_get_vci(id);
+
+ case VCMD_task_xid:
+ return vc_task_xid(id);
+ case VCMD_vx_info:
+ return vc_vx_info(vxi, data);
+
+ case VCMD_task_nid:
+ return vc_task_nid(id);
+ case VCMD_nx_info:
+ return vc_nx_info(nxi, data);
+
+ case VCMD_task_tag:
+ return vc_task_tag(id);
+
+ /* this is version 1 */
+ case VCMD_set_space:
+ return vc_set_space(vxi, data);
+
+ case VCMD_get_space_mask:
+ return vc_get_space_mask(vxi, data);
+
+#ifdef CONFIG_IA32_EMULATION
+ case VCMD_get_rlimit:
+ return __COMPAT(vc_get_rlimit, vxi, data, compat);
+ case VCMD_set_rlimit:
+ return __COMPAT(vc_set_rlimit, vxi, data, compat);
+#else
+ case VCMD_get_rlimit:
+ return vc_get_rlimit(vxi, data);
+ case VCMD_set_rlimit:
+ return vc_set_rlimit(vxi, data);
+#endif
+ case VCMD_get_rlimit_mask:
+ return vc_get_rlimit_mask(id, data);
+ case VCMD_reset_minmax:
+ return vc_reset_minmax(vxi, data);
+
+ case VCMD_get_vhi_name:
+ return vc_get_vhi_name(vxi, data);
+ case VCMD_set_vhi_name:
+ return vc_set_vhi_name(vxi, data);
+
+ case VCMD_ctx_stat:
+ return vc_ctx_stat(vxi, data);
+ case VCMD_virt_stat:
+ return vc_virt_stat(vxi, data);
+ case VCMD_sock_stat:
+ return vc_sock_stat(vxi, data);
+ case VCMD_rlimit_stat:
+ return vc_rlimit_stat(vxi, data);
+
+ case VCMD_set_cflags:
+ return vc_set_cflags(vxi, data);
+ case VCMD_get_cflags:
+ return vc_get_cflags(vxi, data);
+
+ /* this is version 1 */
+ case VCMD_set_ccaps:
+ return vc_set_ccaps(vxi, data);
+ /* this is version 1 */
+ case VCMD_get_ccaps:
+ return vc_get_ccaps(vxi, data);
+ case VCMD_set_bcaps:
+ return vc_set_bcaps(vxi, data);
+ case VCMD_get_bcaps:
+ return vc_get_bcaps(vxi, data);
+
+ case VCMD_set_badness:
+ return vc_set_badness(vxi, data);
+ case VCMD_get_badness:
+ return vc_get_badness(vxi, data);
+
+ case VCMD_set_nflags:
+ return vc_set_nflags(nxi, data);
+ case VCMD_get_nflags:
+ return vc_get_nflags(nxi, data);
+
+ case VCMD_set_ncaps:
+ return vc_set_ncaps(nxi, data);
+ case VCMD_get_ncaps:
+ return vc_get_ncaps(nxi, data);
+
+ case VCMD_set_sched_v4:
+ return vc_set_sched_v4(vxi, data);
+ /* this is version 5 */
+ case VCMD_set_sched:
+ return vc_set_sched(vxi, data);
+ case VCMD_get_sched:
+ return vc_get_sched(vxi, data);
+ case VCMD_sched_info:
+ return vc_sched_info(vxi, data);
+
+ case VCMD_add_dlimit:
+ return __COMPAT(vc_add_dlimit, id, data, compat);
+ case VCMD_rem_dlimit:
+ return __COMPAT(vc_rem_dlimit, id, data, compat);
+ case VCMD_set_dlimit:
+ return __COMPAT(vc_set_dlimit, id, data, compat);
+ case VCMD_get_dlimit:
+ return __COMPAT(vc_get_dlimit, id, data, compat);
+
+ case VCMD_ctx_kill:
+ return vc_ctx_kill(vxi, data);
+
+ case VCMD_wait_exit:
+ return vc_wait_exit(vxi, data);
+
+ case VCMD_get_iattr:
+ return __COMPAT_NO_ID(vc_get_iattr, data, compat);
+ case VCMD_set_iattr:
+ return __COMPAT_NO_ID(vc_set_iattr, data, compat);
+
+ case VCMD_fget_iattr:
+ return vc_fget_iattr(id, data);
+ case VCMD_fset_iattr:
+ return vc_fset_iattr(id, data);
+
+ case VCMD_enter_space_v0:
+ return vc_enter_space(vxi, NULL);
+ /* this is version 1 */
+ case VCMD_enter_space:
+ return vc_enter_space(vxi, data);
+
+ case VCMD_ctx_create_v0:
+ return vc_ctx_create(id, NULL);
+ case VCMD_ctx_create:
+ return vc_ctx_create(id, data);
+ case VCMD_ctx_migrate_v0:
+ return vc_ctx_migrate(vxi, NULL);
+ case VCMD_ctx_migrate:
+ return vc_ctx_migrate(vxi, data);
+
+ case VCMD_net_create_v0:
+ return vc_net_create(id, NULL);
+ case VCMD_net_create:
+ return vc_net_create(id, data);
+ case VCMD_net_migrate:
+ return vc_net_migrate(nxi, data);
+
+ case VCMD_tag_migrate:
+ return vc_tag_migrate(id);
+
+ case VCMD_net_add:
+ return vc_net_add(nxi, data);
+ case VCMD_net_remove:
+ return vc_net_remove(nxi, data);
+
+ case VCMD_net_add_ipv4:
+ return vc_net_add_ipv4(nxi, data);
+ case VCMD_net_remove_ipv4:
+ return vc_net_remove_ipv4(nxi, data);
+#ifdef CONFIG_IPV6
+ case VCMD_net_add_ipv6:
+ return vc_net_add_ipv6(nxi, data);
+ case VCMD_net_remove_ipv6:
+ return vc_net_remove_ipv6(nxi, data);
+#endif
+/* case VCMD_add_match_ipv4:
+ return vc_add_match_ipv4(nxi, data);
+ case VCMD_get_match_ipv4:
+ return vc_get_match_ipv4(nxi, data);
+#ifdef CONFIG_IPV6
+ case VCMD_add_match_ipv6:
+ return vc_add_match_ipv6(nxi, data);
+ case VCMD_get_match_ipv6:
+ return vc_get_match_ipv6(nxi, data);
+#endif */
+
+#ifdef CONFIG_VSERVER_DEVICE
+ case VCMD_set_mapping:
+ return __COMPAT(vc_set_mapping, vxi, data, compat);
+ case VCMD_unset_mapping:
+ return __COMPAT(vc_unset_mapping, vxi, data, compat);
+#endif
+#ifdef CONFIG_VSERVER_HISTORY
+ case VCMD_dump_history:
+ return vc_dump_history(id);
+ case VCMD_read_history:
+ return __COMPAT(vc_read_history, id, data, compat);
+#endif
+#ifdef CONFIG_VSERVER_MONITOR
+ case VCMD_read_monitor:
+ return __COMPAT(vc_read_monitor, id, data, compat);
+#endif
+ default:
+ vxwprintk_task(1, "unimplemented VCMD_%02d_%d[%d]",
+ VC_CATEGORY(cmd), VC_COMMAND(cmd), VC_VERSION(cmd));
+ }
+ return -ENOSYS;
+}
+
+
+#define __VCMD(vcmd, _perm, _args, _flags) \
+ case VCMD_ ## vcmd: perm = _perm; \
+ args = _args; flags = _flags; break
+
+
+#define VCA_NONE 0x00
+#define VCA_VXI 0x01
+#define VCA_NXI 0x02
+
+#define VCF_NONE 0x00
+#define VCF_INFO 0x01
+#define VCF_ADMIN 0x02
+#define VCF_ARES 0x06 /* includes admin */
+#define VCF_SETUP 0x08
+
+#define VCF_ZIDOK 0x10 /* zero id okay */
+
+
+static inline
+long do_vserver(uint32_t cmd, uint32_t id, void __user *data, int compat)
+{
+ long ret;
+ int permit = -1, state = 0;
+ int perm = -1, args = 0, flags = 0;
+ struct vx_info *vxi = NULL;
+ struct nx_info *nxi = NULL;
+
+ switch (cmd) {
+ /* unpriviledged commands */
+ __VCMD(get_version, 0, VCA_NONE, 0);
+ __VCMD(get_vci, 0, VCA_NONE, 0);
+ __VCMD(get_rlimit_mask, 0, VCA_NONE, 0);
+ __VCMD(get_space_mask, 0, VCA_NONE, 0);
+
+ /* info commands */
+ __VCMD(task_xid, 2, VCA_NONE, 0);
+ __VCMD(reset_minmax, 2, VCA_VXI, 0);
+ __VCMD(vx_info, 3, VCA_VXI, VCF_INFO);
+ __VCMD(get_bcaps, 3, VCA_VXI, VCF_INFO);
+ __VCMD(get_ccaps, 3, VCA_VXI, VCF_INFO);
+ __VCMD(get_cflags, 3, VCA_VXI, VCF_INFO);
+ __VCMD(get_badness, 3, VCA_VXI, VCF_INFO);
+ __VCMD(get_vhi_name, 3, VCA_VXI, VCF_INFO);
+ __VCMD(get_rlimit, 3, VCA_VXI, VCF_INFO);
+
+ __VCMD(ctx_stat, 3, VCA_VXI, VCF_INFO);
+ __VCMD(virt_stat, 3, VCA_VXI, VCF_INFO);
+ __VCMD(sock_stat, 3, VCA_VXI, VCF_INFO);
+ __VCMD(rlimit_stat, 3, VCA_VXI, VCF_INFO);
+
+ __VCMD(task_nid, 2, VCA_NONE, 0);
+ __VCMD(nx_info, 3, VCA_NXI, VCF_INFO);
+ __VCMD(get_ncaps, 3, VCA_NXI, VCF_INFO);
+ __VCMD(get_nflags, 3, VCA_NXI, VCF_INFO);
+
+ __VCMD(task_tag, 2, VCA_NONE, 0);
+
+ __VCMD(get_iattr, 2, VCA_NONE, 0);
+ __VCMD(fget_iattr, 2, VCA_NONE, 0);
+ __VCMD(get_dlimit, 3, VCA_NONE, VCF_INFO);
+ __VCMD(get_sched, 3, VCA_VXI, VCF_INFO);
+ __VCMD(sched_info, 3, VCA_VXI, VCF_INFO | VCF_ZIDOK);
+
+ /* lower admin commands */
+ __VCMD(wait_exit, 4, VCA_VXI, VCF_INFO);
+ __VCMD(ctx_create_v0, 5, VCA_NONE, 0);
+ __VCMD(ctx_create, 5, VCA_NONE, 0);
+ __VCMD(ctx_migrate_v0, 5, VCA_VXI, VCF_ADMIN);
+ __VCMD(ctx_migrate, 5, VCA_VXI, VCF_ADMIN);
+ __VCMD(enter_space_v0, 5, VCA_VXI, VCF_ADMIN);
+ __VCMD(enter_space, 5, VCA_VXI, VCF_ADMIN);
+
+ __VCMD(net_create_v0, 5, VCA_NONE, 0);
+ __VCMD(net_create, 5, VCA_NONE, 0);
+ __VCMD(net_migrate, 5, VCA_NXI, VCF_ADMIN);
+
+ __VCMD(tag_migrate, 5, VCA_NONE, VCF_ADMIN);
+
+ /* higher admin commands */
+ __VCMD(ctx_kill, 6, VCA_VXI, VCF_ARES);
+ __VCMD(set_space, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
+
+ __VCMD(set_ccaps, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
+ __VCMD(set_bcaps, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
+ __VCMD(set_cflags, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
+ __VCMD(set_badness, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
+
+ __VCMD(set_vhi_name, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
+ __VCMD(set_rlimit, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
+ __VCMD(set_sched, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
+ __VCMD(set_sched_v4, 7, VCA_VXI, VCF_ARES | VCF_SETUP);
+
+ __VCMD(set_ncaps, 7, VCA_NXI, VCF_ARES | VCF_SETUP);
+ __VCMD(set_nflags, 7, VCA_NXI, VCF_ARES | VCF_SETUP);
+ __VCMD(net_add, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
+ __VCMD(net_remove, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
+ __VCMD(net_add_ipv4, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
+ __VCMD(net_remove_ipv4, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
+#ifdef CONFIG_IPV6
+ __VCMD(net_add_ipv6, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
+ __VCMD(net_remove_ipv6, 8, VCA_NXI, VCF_ARES | VCF_SETUP);
+#endif
+ __VCMD(set_iattr, 7, VCA_NONE, 0);
+ __VCMD(fset_iattr, 7, VCA_NONE, 0);
+ __VCMD(set_dlimit, 7, VCA_NONE, VCF_ARES);
+ __VCMD(add_dlimit, 8, VCA_NONE, VCF_ARES);
+ __VCMD(rem_dlimit, 8, VCA_NONE, VCF_ARES);
+
+#ifdef CONFIG_VSERVER_DEVICE
+ __VCMD(set_mapping, 8, VCA_VXI, VCF_ARES|VCF_ZIDOK);
+ __VCMD(unset_mapping, 8, VCA_VXI, VCF_ARES|VCF_ZIDOK);
+#endif
+ /* debug level admin commands */
+#ifdef CONFIG_VSERVER_HISTORY
+ __VCMD(dump_history, 9, VCA_NONE, 0);
+ __VCMD(read_history, 9, VCA_NONE, 0);
+#endif
+#ifdef CONFIG_VSERVER_MONITOR
+ __VCMD(read_monitor, 9, VCA_NONE, 0);
+#endif
+
+ default:
+ perm = -1;
+ }
+
+ vxdprintk(VXD_CBIT(switch, 0),
+ "vc: VCMD_%02d_%d[%d], %d,%p [%d,%d,%x,%x]",
+ VC_CATEGORY(cmd), VC_COMMAND(cmd),
+ VC_VERSION(cmd), id, data, compat,
+ perm, args, flags);
+
+ ret = -ENOSYS;
+ if (perm < 0)
+ goto out;
+
+ state = 1;
+ if (!capable(CAP_CONTEXT))
+ goto out;
+
+ state = 2;
+ /* moved here from the individual commands */
+ ret = -EPERM;
+ if ((perm > 1) && !capable(CAP_SYS_ADMIN))
+ goto out;
+
+ state = 3;
+ /* vcmd involves resource management */
+ ret = -EPERM;
+ if ((flags & VCF_ARES) && !capable(CAP_SYS_RESOURCE))
+ goto out;
+
+ state = 4;
+ /* various legacy exceptions */
+ switch (cmd) {
+ /* will go away when spectator is a cap */
+ case VCMD_ctx_migrate_v0:
+ case VCMD_ctx_migrate:
+ if (id == 1) {
+ current->xid = 1;
+ ret = 1;
+ goto out;
+ }
+ break;
+
+ /* will go away when spectator is a cap */
+ case VCMD_net_migrate:
+ if (id == 1) {
+ current->nid = 1;
+ ret = 1;
+ goto out;
+ }
+ break;
+ }
+
+ /* vcmds are fine by default */
+ permit = 1;
+
+ /* admin type vcmds require admin ... */
+ if (flags & VCF_ADMIN)
+ permit = vx_check(0, VS_ADMIN) ? 1 : 0;
+
+ /* ... but setup type vcmds override that */
+ if (!permit && (flags & VCF_SETUP))
+ permit = vx_flags(VXF_STATE_SETUP, 0) ? 2 : 0;
+
+ state = 5;
+ ret = -EPERM;
+ if (!permit)
+ goto out;
+
+ state = 6;
+ if (!id && (flags & VCF_ZIDOK))
+ goto skip_id;
+
+ ret = -ESRCH;
+ if (args & VCA_VXI) {
+ vxi = lookup_vx_info(id);
+ if (!vxi)
+ goto out;
+
+ if ((flags & VCF_ADMIN) &&
+ /* special case kill for shutdown */
+ (cmd != VCMD_ctx_kill) &&
+ /* can context be administrated? */
+ !vx_info_flags(vxi, VXF_STATE_ADMIN, 0)) {
+ ret = -EACCES;
+ goto out_vxi;
+ }
+ }
+ state = 7;
+ if (args & VCA_NXI) {
+ nxi = lookup_nx_info(id);
+ if (!nxi)
+ goto out_vxi;
+
+ if ((flags & VCF_ADMIN) &&
+ /* can context be administrated? */
+ !nx_info_flags(nxi, NXF_STATE_ADMIN, 0)) {
+ ret = -EACCES;
+ goto out_nxi;
+ }
+ }
+skip_id:
+ state = 8;
+ ret = do_vcmd(cmd, id, vxi, nxi, data, compat);
+
+out_nxi:
+ if ((args & VCA_NXI) && nxi)
+ put_nx_info(nxi);
+out_vxi:
+ if ((args & VCA_VXI) && vxi)
+ put_vx_info(vxi);
+out:
+ vxdprintk(VXD_CBIT(switch, 1),
+ "vc: VCMD_%02d_%d[%d] = %08lx(%ld) [%d,%d]",
+ VC_CATEGORY(cmd), VC_COMMAND(cmd),
+ VC_VERSION(cmd), ret, ret, state, permit);
+ return ret;
+}
+
+asmlinkage long
+sys_vserver(uint32_t cmd, uint32_t id, void __user *data)
+{
+ return do_vserver(cmd, id, data, 0);
+}
+
+#ifdef CONFIG_COMPAT
+
+asmlinkage long
+sys32_vserver(uint32_t cmd, uint32_t id, void __user *data)
+{
+ return do_vserver(cmd, id, data, 1);
+}
+
+#endif /* CONFIG_COMPAT */
--- a/kernel/vserver/sysctl.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/sysctl.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,244 @@
+/*
+ * kernel/vserver/sysctl.c
+ *
+ * Virtual Context Support
+ *
+ * Copyright (C) 2004-2007 Herbert P<>tzl
+ *
+ * V0.01 basic structure
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/parser.h>
+#include <asm/uaccess.h>
+
+
+enum {
+ CTL_DEBUG_ERROR = 0,
+ CTL_DEBUG_SWITCH = 1,
+ CTL_DEBUG_XID,
+ CTL_DEBUG_NID,
+ CTL_DEBUG_TAG,
+ CTL_DEBUG_NET,
+ CTL_DEBUG_LIMIT,
+ CTL_DEBUG_CRES,
+ CTL_DEBUG_DLIM,
+ CTL_DEBUG_QUOTA,
+ CTL_DEBUG_CVIRT,
+ CTL_DEBUG_SPACE,
+ CTL_DEBUG_MISC,
+};
+
+
+unsigned int vx_debug_switch = 0;
+unsigned int vx_debug_xid = 0;
+unsigned int vx_debug_nid = 0;
+unsigned int vx_debug_tag = 0;
+unsigned int vx_debug_net = 0;
+unsigned int vx_debug_limit = 0;
+unsigned int vx_debug_cres = 0;
+unsigned int vx_debug_dlim = 0;
+unsigned int vx_debug_quota = 0;
+unsigned int vx_debug_cvirt = 0;
+unsigned int vx_debug_space = 0;
+unsigned int vx_debug_misc = 0;
+
+
+static struct ctl_table_header *vserver_table_header;
+static ctl_table vserver_root_table[];
+
+
+void vserver_register_sysctl(void)
+{
+ if (!vserver_table_header) {
+ vserver_table_header = register_sysctl_table(vserver_root_table);
+ }
+
+}
+
+void vserver_unregister_sysctl(void)
+{
+ if (vserver_table_header) {
+ unregister_sysctl_table(vserver_table_header);
+ vserver_table_header = NULL;
+ }
+}
+
+
+static int proc_dodebug(ctl_table *table, int write,
+ struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ char tmpbuf[20], *p, c;
+ unsigned int value;
+ size_t left, len;
+
+ if ((*ppos && !write) || !*lenp) {
+ *lenp = 0;
+ return 0;
+ }
+
+ left = *lenp;
+
+ if (write) {
+ if (!access_ok(VERIFY_READ, buffer, left))
+ return -EFAULT;
+ p = (char *)buffer;
+ while (left && __get_user(c, p) >= 0 && isspace(c))
+ left--, p++;
+ if (!left)
+ goto done;
+
+ if (left > sizeof(tmpbuf) - 1)
+ return -EINVAL;
+ if (copy_from_user(tmpbuf, p, left))
+ return -EFAULT;
+ tmpbuf[left] = '\0';
+
+ for (p = tmpbuf, value = 0; '0' <= *p && *p <= '9'; p++, left--)
+ value = 10 * value + (*p - '0');
+ if (*p && !isspace(*p))
+ return -EINVAL;
+ while (left && isspace(*p))
+ left--, p++;
+ *(unsigned int *)table->data = value;
+ } else {
+ if (!access_ok(VERIFY_WRITE, buffer, left))
+ return -EFAULT;
+ len = sprintf(tmpbuf, "%d", *(unsigned int *)table->data);
+ if (len > left)
+ len = left;
+ if (__copy_to_user(buffer, tmpbuf, len))
+ return -EFAULT;
+ if ((left -= len) > 0) {
+ if (put_user('\n', (char *)buffer + len))
+ return -EFAULT;
+ left--;
+ }
+ }
+
+done:
+ *lenp -= left;
+ *ppos += *lenp;
+ return 0;
+}
+
+static int zero;
+
+#define CTL_ENTRY(ctl, name) \
+ { \
+ .ctl_name = ctl, \
+ .procname = #name, \
+ .data = &vx_ ## name, \
+ .maxlen = sizeof(int), \
+ .mode = 0644, \
+ .proc_handler = &proc_dodebug, \
+ .strategy = &sysctl_intvec, \
+ .extra1 = &zero, \
+ }
+
+static ctl_table vserver_debug_table[] = {
+ CTL_ENTRY(CTL_DEBUG_SWITCH, debug_switch),
+ CTL_ENTRY(CTL_DEBUG_XID, debug_xid),
+ CTL_ENTRY(CTL_DEBUG_NID, debug_nid),
+ CTL_ENTRY(CTL_DEBUG_TAG, debug_tag),
+ CTL_ENTRY(CTL_DEBUG_NET, debug_net),
+ CTL_ENTRY(CTL_DEBUG_LIMIT, debug_limit),
+ CTL_ENTRY(CTL_DEBUG_CRES, debug_cres),
+ CTL_ENTRY(CTL_DEBUG_DLIM, debug_dlim),
+ CTL_ENTRY(CTL_DEBUG_QUOTA, debug_quota),
+ CTL_ENTRY(CTL_DEBUG_CVIRT, debug_cvirt),
+ CTL_ENTRY(CTL_DEBUG_SPACE, debug_space),
+ CTL_ENTRY(CTL_DEBUG_MISC, debug_misc),
+ { .ctl_name = 0 }
+};
+
+static ctl_table vserver_root_table[] = {
+ {
+ .ctl_name = CTL_VSERVER,
+ .procname = "vserver",
+ .mode = 0555,
+ .child = vserver_debug_table
+ },
+ { .ctl_name = 0 }
+};
+
+
+static match_table_t tokens = {
+ { CTL_DEBUG_SWITCH, "switch=%x" },
+ { CTL_DEBUG_XID, "xid=%x" },
+ { CTL_DEBUG_NID, "nid=%x" },
+ { CTL_DEBUG_TAG, "tag=%x" },
+ { CTL_DEBUG_NET, "net=%x" },
+ { CTL_DEBUG_LIMIT, "limit=%x" },
+ { CTL_DEBUG_CRES, "cres=%x" },
+ { CTL_DEBUG_DLIM, "dlim=%x" },
+ { CTL_DEBUG_QUOTA, "quota=%x" },
+ { CTL_DEBUG_CVIRT, "cvirt=%x" },
+ { CTL_DEBUG_SPACE, "space=%x" },
+ { CTL_DEBUG_MISC, "misc=%x" },
+ { CTL_DEBUG_ERROR, NULL }
+};
+
+#define HANDLE_CASE(id, name, val) \
+ case CTL_DEBUG_ ## id: \
+ vx_debug_ ## name = val; \
+ printk("vs_debug_" #name "=0x%x\n", val); \
+ break
+
+
+static int __init vs_debug_setup(char *str)
+{
+ char *p;
+ int token;
+
+ printk("vs_debug_setup(%s)\n", str);
+ while ((p = strsep(&str, ",")) != NULL) {
+ substring_t args[MAX_OPT_ARGS];
+ unsigned int value;
+
+ if (!*p)
+ continue;
+
+ token = match_token(p, tokens, args);
+ value = (token > 0) ? simple_strtoul(args[0].from, NULL, 0) : 0;
+
+ switch (token) {
+ HANDLE_CASE(SWITCH, switch, value);
+ HANDLE_CASE(XID, xid, value);
+ HANDLE_CASE(NID, nid, value);
+ HANDLE_CASE(TAG, tag, value);
+ HANDLE_CASE(NET, net, value);
+ HANDLE_CASE(LIMIT, limit, value);
+ HANDLE_CASE(CRES, cres, value);
+ HANDLE_CASE(DLIM, dlim, value);
+ HANDLE_CASE(QUOTA, quota, value);
+ HANDLE_CASE(CVIRT, cvirt, value);
+ HANDLE_CASE(SPACE, space, value);
+ HANDLE_CASE(MISC, misc, value);
+ default:
+ return -EINVAL;
+ break;
+ }
+ }
+ return 1;
+}
+
+__setup("vsdebug=", vs_debug_setup);
+
+
+
+EXPORT_SYMBOL_GPL(vx_debug_switch);
+EXPORT_SYMBOL_GPL(vx_debug_xid);
+EXPORT_SYMBOL_GPL(vx_debug_nid);
+EXPORT_SYMBOL_GPL(vx_debug_net);
+EXPORT_SYMBOL_GPL(vx_debug_limit);
+EXPORT_SYMBOL_GPL(vx_debug_cres);
+EXPORT_SYMBOL_GPL(vx_debug_dlim);
+EXPORT_SYMBOL_GPL(vx_debug_quota);
+EXPORT_SYMBOL_GPL(vx_debug_cvirt);
+EXPORT_SYMBOL_GPL(vx_debug_space);
+EXPORT_SYMBOL_GPL(vx_debug_misc);
+
--- a/kernel/vserver/tag.c 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/tag.c 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,63 @@
+/*
+ * linux/kernel/vserver/tag.c
+ *
+ * Virtual Server: Shallow Tag Space
+ *
+ * Copyright (C) 2007 Herbert P<>tzl
+ *
+ * V0.01 basic implementation
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/vserver/debug.h>
+#include <linux/vs_pid.h>
+#include <linux/vs_tag.h>
+
+#include <linux/vserver/tag_cmd.h>
+
+
+int dx_migrate_task(struct task_struct *p, tag_t tag)
+{
+ if (!p)
+ BUG();
+
+ vxdprintk(VXD_CBIT(tag, 5),
+ "dx_migrate_task(%p[#%d],#%d)", p, p->tag, tag);
+
+ task_lock(p);
+ p->tag = tag;
+ task_unlock(p);
+
+ vxdprintk(VXD_CBIT(tag, 5),
+ "moved task %p into [#%d]", p, tag);
+ return 0;
+}
+
+/* vserver syscall commands below here */
+
+/* taks xid and vx_info functions */
+
+
+int vc_task_tag(uint32_t id)
+{
+ tag_t tag;
+
+ if (id) {
+ struct task_struct *tsk;
+ read_lock(&tasklist_lock);
+ tsk = find_task_by_real_pid(id);
+ tag = (tsk) ? tsk->tag : -ESRCH;
+ read_unlock(&tasklist_lock);
+ } else
+ tag = dx_current_tag();
+ return tag;
+}
+
+
+int vc_tag_migrate(uint32_t tag)
+{
+ return dx_migrate_task(current, tag & 0xFFFF);
+}
+
+
--- a/kernel/vserver/vci_config.h 1969-12-31 19:00:00.000000000 -0500
+++ a/kernel/vserver/vci_config.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,81 @@
+
+/* interface version */
+
+#define VCI_VERSION 0x00020302
+
+
+enum {
+ VCI_KCBIT_NO_DYNAMIC = 0,
+
+ VCI_KCBIT_PROC_SECURE = 4,
+ VCI_KCBIT_HARDCPU = 5,
+ VCI_KCBIT_IDLELIMIT = 6,
+ VCI_KCBIT_IDLETIME = 7,
+
+ VCI_KCBIT_COWBL = 8,
+ VCI_KCBIT_FULLCOWBL = 9,
+ VCI_KCBIT_SPACES = 10,
+ VCI_KCBIT_NETV2 = 11,
+
+ VCI_KCBIT_DEBUG = 16,
+ VCI_KCBIT_HISTORY = 20,
+ VCI_KCBIT_TAGGED = 24,
+ VCI_KCBIT_PPTAG = 28,
+
+ VCI_KCBIT_MORE = 31,
+};
+
+
+static inline uint32_t vci_kernel_config(void)
+{
+ return
+ (1 << VCI_KCBIT_NO_DYNAMIC) |
+
+ /* configured features */
+#ifdef CONFIG_VSERVER_PROC_SECURE
+ (1 << VCI_KCBIT_PROC_SECURE) |
+#endif
+#ifdef CONFIG_VSERVER_HARDCPU
+ (1 << VCI_KCBIT_HARDCPU) |
+#endif
+#ifdef CONFIG_VSERVER_IDLELIMIT
+ (1 << VCI_KCBIT_IDLELIMIT) |
+#endif
+#ifdef CONFIG_VSERVER_IDLETIME
+ (1 << VCI_KCBIT_IDLETIME) |
+#endif
+#ifdef CONFIG_VSERVER_COWBL
+ (1 << VCI_KCBIT_COWBL) |
+ (1 << VCI_KCBIT_FULLCOWBL) |
+#endif
+ (1 << VCI_KCBIT_SPACES) |
+ (1 << VCI_KCBIT_NETV2) |
+
+ /* debug options */
+#ifdef CONFIG_VSERVER_DEBUG
+ (1 << VCI_KCBIT_DEBUG) |
+#endif
+#ifdef CONFIG_VSERVER_HISTORY
+ (1 << VCI_KCBIT_HISTORY) |
+#endif
+
+ /* inode context tagging */
+#if defined(CONFIG_TAGGING_NONE)
+ (0 << VCI_KCBIT_TAGGED) |
+#elif defined(CONFIG_TAGGING_UID16)
+ (1 << VCI_KCBIT_TAGGED) |
+#elif defined(CONFIG_TAGGING_GID16)
+ (2 << VCI_KCBIT_TAGGED) |
+#elif defined(CONFIG_TAGGING_ID24)
+ (3 << VCI_KCBIT_TAGGED) |
+#elif defined(CONFIG_TAGGING_INTERN)
+ (4 << VCI_KCBIT_TAGGED) |
+#elif defined(CONFIG_TAGGING_RUNTIME)
+ (5 << VCI_KCBIT_TAGGED) |
+#else
+ (7 << VCI_KCBIT_TAGGED) |
+#endif
+ (1 << VCI_KCBIT_PPTAG) |
+ 0;
+}
+
--- a/mm/filemap_xip.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/filemap_xip.c 2008-04-19 15:14:52.000000000 -0400
@@ -14,6 +14,7 @@
#include <linux/uio.h>
#include <linux/rmap.h>
#include <linux/sched.h>
+#include <linux/vs_memory.h>
#include <asm/tlbflush.h>
/*
--- a/mm/fremap.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/fremap.c 2008-04-19 15:14:52.000000000 -0400
@@ -15,6 +15,7 @@
#include <linux/rmap.h>
#include <linux/module.h>
#include <linux/syscalls.h>
+#include <linux/vs_memory.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
--- a/mm/hugetlb.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/hugetlb.c 2008-04-19 15:14:52.000000000 -0400
@@ -19,6 +19,7 @@
#include <asm/pgtable.h>
#include <linux/hugetlb.h>
+#include <linux/vs_memory.h>
#include "internal.h"
const unsigned long hugetlb_zero = 0, hugetlb_infinity = ~0UL;
--- a/mm/memory.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/memory.c 2008-04-19 15:14:52.000000000 -0400
@@ -505,6 +505,9 @@ static int copy_pte_range(struct mm_stru
int progress = 0;
int rss[2];
+ if (!vx_rss_avail(dst_mm, ((end - addr)/PAGE_SIZE + 1)))
+ return -ENOMEM;
+
again:
rss[1] = rss[0] = 0;
dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
@@ -2058,6 +2061,11 @@ static int do_swap_page(struct mm_struct
goto out;
}
+ if (!vx_rss_avail(mm, 1)) {
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
+
mark_page_accessed(page);
lock_page(page);
delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
@@ -2129,6 +2137,8 @@ static int do_anonymous_page(struct mm_s
/* Allocate our own private page. */
pte_unmap(page_table);
+ if (!vx_rss_avail(mm, 1))
+ goto oom;
if (unlikely(anon_vma_prepare(vma)))
goto oom;
page = alloc_zeroed_user_highpage_movable(vma, address);
@@ -2453,6 +2463,7 @@ static inline int handle_pte_fault(struc
{
pte_t entry;
spinlock_t *ptl;
+ int ret = 0, type = VXPT_UNKNOWN;
entry = *pte;
if (!pte_present(entry)) {
@@ -2480,9 +2491,12 @@ static inline int handle_pte_fault(struc
if (unlikely(!pte_same(*pte, entry)))
goto unlock;
if (write_access) {
- if (!pte_write(entry))
- return do_wp_page(mm, vma, address,
+ if (!pte_write(entry)) {
+ ret = do_wp_page(mm, vma, address,
pte, pmd, ptl, entry);
+ type = VXPT_WRITE;
+ goto out;
+ }
entry = pte_mkdirty(entry);
}
entry = pte_mkyoung(entry);
@@ -2500,7 +2514,10 @@ static inline int handle_pte_fault(struc
}
unlock:
pte_unmap_unlock(pte, ptl);
- return 0;
+ ret = 0;
+out:
+ vx_page_fault(mm, vma, type, ret);
+ return ret;
}
/*
--- a/mm/mlock.c 2008-04-17 10:37:25.000000000 -0400
+++ a/mm/mlock.c 2008-04-19 15:14:52.000000000 -0400
@@ -12,6 +12,7 @@
#include <linux/syscalls.h>
#include <linux/sched.h>
#include <linux/module.h>
+#include <linux/vs_memory.h>
int can_do_mlock(void)
{
@@ -76,7 +77,7 @@ success:
ret = make_pages_present(start, end);
}
- mm->locked_vm -= pages;
+ vx_vmlocked_sub(mm, pages);
out:
if (ret == -ENOMEM)
ret = -EAGAIN;
@@ -134,7 +135,7 @@ static int do_mlock(unsigned long start,
asmlinkage long sys_mlock(unsigned long start, size_t len)
{
- unsigned long locked;
+ unsigned long locked, grow;
unsigned long lock_limit;
int error = -ENOMEM;
@@ -145,8 +146,10 @@ asmlinkage long sys_mlock(unsigned long
len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
start &= PAGE_MASK;
- locked = len >> PAGE_SHIFT;
- locked += current->mm->locked_vm;
+ grow = len >> PAGE_SHIFT;
+ if (!vx_vmlocked_avail(current->mm, grow))
+ goto out;
+ locked = current->mm->locked_vm + grow;
lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
lock_limit >>= PAGE_SHIFT;
@@ -154,6 +157,7 @@ asmlinkage long sys_mlock(unsigned long
/* check against resource limits */
if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
error = do_mlock(start, len, 1);
+out:
up_write(&current->mm->mmap_sem);
return error;
}
@@ -213,6 +217,8 @@ asmlinkage long sys_mlockall(int flags)
lock_limit >>= PAGE_SHIFT;
ret = -ENOMEM;
+ if (!vx_vmlocked_avail(current->mm, current->mm->total_vm))
+ goto out;
if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
capable(CAP_IPC_LOCK))
ret = do_mlockall(flags);
--- a/mm/mmap.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/mmap.c 2008-04-19 15:14:52.000000000 -0400
@@ -1197,10 +1197,10 @@ munmap_back:
kmem_cache_free(vm_area_cachep, vma);
}
out:
- mm->total_vm += len >> PAGE_SHIFT;
+ vx_vmpages_add(mm, len >> PAGE_SHIFT);
vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
if (vm_flags & VM_LOCKED) {
- mm->locked_vm += len >> PAGE_SHIFT;
+ vx_vmlocked_add(mm, len >> PAGE_SHIFT);
make_pages_present(addr, addr + len);
}
if ((flags & MAP_POPULATE) && !(flags & MAP_NONBLOCK))
@@ -1549,9 +1549,9 @@ static int acct_stack_growth(struct vm_a
return -ENOMEM;
/* Ok, everything looks good - let it rip */
- mm->total_vm += grow;
+ vx_vmpages_add(mm, grow);
if (vma->vm_flags & VM_LOCKED)
- mm->locked_vm += grow;
+ vx_vmlocked_add(mm, grow);
vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
return 0;
}
@@ -1722,9 +1722,9 @@ static void remove_vma_list(struct mm_st
do {
long nrpages = vma_pages(vma);
- mm->total_vm -= nrpages;
+ vx_vmpages_sub(mm, nrpages);
if (vma->vm_flags & VM_LOCKED)
- mm->locked_vm -= nrpages;
+ vx_vmlocked_sub(mm, nrpages);
vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
vma = remove_vma(vma);
} while (vma);
@@ -1967,6 +1967,8 @@ unsigned long do_brk(unsigned long addr,
lock_limit >>= PAGE_SHIFT;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
return -EAGAIN;
+ if (!vx_vmlocked_avail(mm, len >> PAGE_SHIFT))
+ return -ENOMEM;
}
/*
@@ -1993,7 +1995,8 @@ unsigned long do_brk(unsigned long addr,
if (mm->map_count > sysctl_max_map_count)
return -ENOMEM;
- if (security_vm_enough_memory(len >> PAGE_SHIFT))
+ if (security_vm_enough_memory(len >> PAGE_SHIFT) ||
+ !vx_vmpages_avail(mm, len >> PAGE_SHIFT))
return -ENOMEM;
/* Can we just expand an old private anonymous mapping? */
@@ -2018,9 +2021,9 @@ unsigned long do_brk(unsigned long addr,
vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
- mm->total_vm += len >> PAGE_SHIFT;
+ vx_vmpages_add(mm, len >> PAGE_SHIFT);
if (flags & VM_LOCKED) {
- mm->locked_vm += len >> PAGE_SHIFT;
+ vx_vmlocked_add(mm, len >> PAGE_SHIFT);
make_pages_present(addr, addr + len);
}
return addr;
@@ -2049,6 +2052,11 @@ void exit_mmap(struct mm_struct *mm)
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
tlb_finish_mmu(tlb, 0, end);
+ set_mm_counter(mm, file_rss, 0);
+ set_mm_counter(mm, anon_rss, 0);
+ vx_vmpages_sub(mm, mm->total_vm);
+ vx_vmlocked_sub(mm, mm->locked_vm);
+
/*
* Walk the list again, actually closing and freeing it,
* with preemption enabled, without holding any MM locks.
@@ -2088,7 +2096,8 @@ int insert_vm_struct(struct mm_struct *
if (__vma && __vma->vm_start < vma->vm_end)
return -ENOMEM;
if ((vma->vm_flags & VM_ACCOUNT) &&
- security_vm_enough_memory_mm(mm, vma_pages(vma)))
+ (security_vm_enough_memory_mm(mm, vma_pages(vma)) ||
+ !vx_vmpages_avail(mm, vma_pages(vma))))
return -ENOMEM;
vma_link(mm, vma, prev, rb_link, rb_parent);
return 0;
@@ -2161,6 +2170,8 @@ int may_expand_vm(struct mm_struct *mm,
if (cur + npages > lim)
return 0;
+ if (!vx_vmpages_avail(mm, npages))
+ return 0;
return 1;
}
@@ -2238,7 +2249,6 @@ int install_special_mapping(struct mm_st
return -ENOMEM;
}
- mm->total_vm += len >> PAGE_SHIFT;
-
+ vx_vmpages_add(mm, len >> PAGE_SHIFT);
return 0;
}
--- a/mm/mremap.c 2008-04-17 11:31:40.000000000 -0400
+++ a/mm/mremap.c 2008-04-19 15:14:52.000000000 -0400
@@ -18,6 +18,7 @@
#include <linux/highmem.h>
#include <linux/security.h>
#include <linux/syscalls.h>
+#include <linux/vs_memory.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
@@ -212,7 +213,7 @@ static unsigned long move_vma(struct vm_
* If this were a serious issue, we'd add a flag to do_munmap().
*/
hiwater_vm = mm->hiwater_vm;
- mm->total_vm += new_len >> PAGE_SHIFT;
+ vx_vmpages_add(mm, new_len >> PAGE_SHIFT);
vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
if (do_munmap(mm, old_addr, old_len) < 0) {
@@ -230,7 +231,7 @@ static unsigned long move_vma(struct vm_
}
if (vm_flags & VM_LOCKED) {
- mm->locked_vm += new_len >> PAGE_SHIFT;
+ vx_vmlocked_add(mm, new_len >> PAGE_SHIFT);
if (new_len > old_len)
make_pages_present(new_addr + old_len,
new_addr + new_len);
@@ -341,6 +342,9 @@ unsigned long do_mremap(unsigned long ad
ret = -EAGAIN;
if (locked > lock_limit && !capable(CAP_IPC_LOCK))
goto out;
+ if (!vx_vmlocked_avail(current->mm,
+ (new_len - old_len) >> PAGE_SHIFT))
+ goto out;
}
if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT)) {
ret = -ENOMEM;
@@ -369,10 +373,10 @@ unsigned long do_mremap(unsigned long ad
vma_adjust(vma, vma->vm_start,
addr + new_len, vma->vm_pgoff, NULL);
- mm->total_vm += pages;
+ vx_vmpages_add(mm, pages);
vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
if (vma->vm_flags & VM_LOCKED) {
- mm->locked_vm += pages;
+ vx_vmlocked_add(mm, pages);
make_pages_present(addr + old_len,
addr + new_len);
}
--- a/mm/nommu.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/nommu.c 2008-04-19 15:14:52.000000000 -0400
@@ -991,7 +991,7 @@ unsigned long do_mmap_pgoff(struct file
realalloc += kobjsize(vma);
askedalloc += sizeof(*vma);
- current->mm->total_vm += len >> PAGE_SHIFT;
+ vx_vmpages_add(current->mm, len >> PAGE_SHIFT);
add_nommu_vma(vma);
@@ -1117,7 +1117,7 @@ int do_munmap(struct mm_struct *mm, unsi
kfree(vml);
update_hiwater_vm(mm);
- mm->total_vm -= len >> PAGE_SHIFT;
+ vx_vmpages_sub(mm, len >> PAGE_SHIFT);
#ifdef DEBUG
show_process_blocks();
@@ -1150,7 +1150,7 @@ void exit_mmap(struct mm_struct * mm)
printk("Exit_mmap:\n");
#endif
- mm->total_vm = 0;
+ vx_vmpages_sub(mm, mm->total_vm);
while ((tmp = mm->context.vmlist)) {
mm->context.vmlist = tmp->next;
--- a/mm/oom_kill.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/oom_kill.c 2008-04-19 16:55:20.000000000 -0400
@@ -26,6 +26,7 @@
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/memcontrol.h>
+#include <linux/vs_memory.h>
int sysctl_panic_on_oom;
int sysctl_oom_kill_allocating_task;
@@ -73,6 +74,12 @@ unsigned long badness(struct task_struct
points = mm->total_vm;
/*
+ * add points for context badness
+ */
+
+ points += vx_badness(p, mm);
+
+ /*
* After this unlock we can no longer dereference local variable `mm'
*/
task_unlock(p);
@@ -162,8 +169,8 @@ unsigned long badness(struct task_struct
}
#ifdef DEBUG
- printk(KERN_DEBUG "OOMkill: task %d (%s) got %lu points\n",
- p->pid, p->comm, points);
+ printk(KERN_DEBUG "OOMkill: task %d:#%u (%s) got %d points\n",
+ task_pid_nr(p), p->xid, p->comm, points);
#endif
return points;
}
@@ -322,8 +329,8 @@ static void __oom_kill_task(struct task_
}
if (verbose)
- printk(KERN_ERR "Killed process %d (%s)\n",
- task_pid_nr(p), p->comm);
+ printk(KERN_ERR "Killed process %d:#%u (%s)\n",
+ task_pid_nr(p), p->xid, p->comm);
/*
* We give our sacrificial lamb high priority and access to
@@ -403,8 +410,8 @@ static int oom_kill_process(struct task_
return 0;
}
- printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
- message, task_pid_nr(p), p->comm, points);
+ printk(KERN_ERR "%s: kill process %d:#%u (%s) score %li or a child\n",
+ message, task_pid_nr(p), p->xid, p->comm, points);
/* Try to kill a child first */
list_for_each_entry(c, &p->children, sibling) {
--- a/mm/page_alloc.c 2008-05-21 14:30:05.000000000 -0400
+++ a/mm/page_alloc.c 2008-05-21 14:30:41.000000000 -0400
@@ -45,6 +45,8 @@
#include <linux/fault-inject.h>
#include <linux/page-isolation.h>
#include <linux/memcontrol.h>
+#include <linux/vs_base.h>
+#include <linux/vs_limit.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -1762,6 +1764,9 @@ void si_meminfo(struct sysinfo *val)
val->totalhigh = totalhigh_pages;
val->freehigh = nr_free_highpages();
val->mem_unit = PAGE_SIZE;
+
+ if (vx_flags(VXF_VIRT_MEM, 0))
+ vx_vsi_meminfo(val);
}
EXPORT_SYMBOL(si_meminfo);
@@ -1782,6 +1787,9 @@ void si_meminfo_node(struct sysinfo *val
val->freehigh = 0;
#endif
val->mem_unit = PAGE_SIZE;
+
+ if (vx_flags(VXF_VIRT_MEM, 0))
+ vx_vsi_meminfo(val);
}
#endif
--- a/mm/rmap.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/rmap.c 2008-04-19 16:53:36.000000000 -0400
@@ -49,6 +49,7 @@
#include <linux/module.h>
#include <linux/kallsyms.h>
#include <linux/memcontrol.h>
+#include <linux/vs_memory.h>
#include <asm/tlbflush.h>
--- a/mm/shmem.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/shmem.c 2008-04-19 15:14:52.000000000 -0400
@@ -56,7 +56,6 @@
#include <asm/pgtable.h>
/* This magic number is used in glibc for posix shared memory */
-#define TMPFS_MAGIC 0x01021994
#define ENTRIES_PER_PAGE (PAGE_CACHE_SIZE/sizeof(unsigned long))
#define ENTRIES_PER_PAGEPAGE (ENTRIES_PER_PAGE*ENTRIES_PER_PAGE)
@@ -1773,7 +1772,7 @@ static int shmem_statfs(struct dentry *d
{
struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
- buf->f_type = TMPFS_MAGIC;
+ buf->f_type = TMPFS_SUPER_MAGIC;
buf->f_bsize = PAGE_CACHE_SIZE;
buf->f_namelen = NAME_MAX;
spin_lock(&sbinfo->stat_lock);
@@ -2341,7 +2340,7 @@ static int shmem_fill_super(struct super
sb->s_maxbytes = SHMEM_MAX_BYTES;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
- sb->s_magic = TMPFS_MAGIC;
+ sb->s_magic = TMPFS_SUPER_MAGIC;
sb->s_op = &shmem_ops;
sb->s_time_gran = 1;
#ifdef CONFIG_TMPFS_POSIX_ACL
--- a/mm/slab.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/slab.c 2008-04-19 15:14:52.000000000 -0400
@@ -509,6 +509,8 @@ struct kmem_cache {
#define STATS_INC_FREEMISS(x) do { } while (0)
#endif
+#include "slab_vs.h"
+
#if DEBUG
/*
@@ -3344,6 +3346,7 @@ retry:
obj = slab_get_obj(cachep, slabp, nodeid);
check_slabp(cachep, slabp);
+ vx_slab_alloc(cachep, flags);
l3->free_objects--;
/* move slabp to correct slabp list: */
list_del(&slabp->list);
@@ -3416,6 +3419,7 @@ __cache_alloc_node(struct kmem_cache *ca
/* ___cache_alloc_node can fall back to other nodes */
ptr = ____cache_alloc_node(cachep, flags, nodeid);
out:
+ vx_slab_alloc(cachep, flags);
local_irq_restore(save_flags);
ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
@@ -3587,6 +3591,7 @@ static inline void __cache_free(struct k
check_irq_off();
objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
+ vx_slab_free(cachep);
/*
* Skip calling cache_free_alien() when the platform is not numa.
--- a/mm/slab_vs.h 1969-12-31 19:00:00.000000000 -0500
+++ a/mm/slab_vs.h 2008-04-19 15:14:52.000000000 -0400
@@ -0,0 +1,27 @@
+
+#include <linux/vserver/context.h>
+
+#include <linux/vs_context.h>
+
+static inline
+void vx_slab_alloc(struct kmem_cache *cachep, gfp_t flags)
+{
+ int what = gfp_zone(cachep->gfpflags);
+
+ if (!current->vx_info)
+ return;
+
+ atomic_add(cachep->buffer_size, &current->vx_info->cacct.slab[what]);
+}
+
+static inline
+void vx_slab_free(struct kmem_cache *cachep)
+{
+ int what = gfp_zone(cachep->gfpflags);
+
+ if (!current->vx_info)
+ return;
+
+ atomic_sub(cachep->buffer_size, &current->vx_info->cacct.slab[what]);
+}
+
--- a/mm/swapfile.c 2008-04-17 12:05:44.000000000 -0400
+++ a/mm/swapfile.c 2008-04-19 15:14:52.000000000 -0400
@@ -32,6 +32,8 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <linux/swapops.h>
+#include <linux/vs_base.h>
+#include <linux/vs_memory.h>
DEFINE_SPINLOCK(swap_lock);
unsigned int nr_swapfiles;
@@ -1743,6 +1745,8 @@ void si_swapinfo(struct sysinfo *val)
val->freeswap = nr_swap_pages + nr_to_be_unused;
val->totalswap = total_swap_pages + nr_to_be_unused;
spin_unlock(&swap_lock);
+ if (vx_flags(VXF_VIRT_MEM, 0))
+ vx_vsi_swapinfo(val);
}
/*
--- a/net/core/dev.c 2008-04-17 12:05:44.000000000 -0400
+++ a/net/core/dev.c 2008-04-19 15:14:52.000000000 -0400
@@ -119,6 +119,7 @@
#include <linux/err.h>
#include <linux/ctype.h>
#include <linux/if_arp.h>
+#include <linux/vs_inet.h>
#include "net-sysfs.h"
@@ -2336,6 +2337,8 @@ static int dev_ifconf(struct net *net, c
total = 0;
for_each_netdev(net, dev) {
+ if (!nx_dev_visible(current->nx_info, dev))
+ continue;
for (i = 0; i < NPROTO; i++) {
if (gifconf_list[i]) {
int done;
@@ -2404,6 +2407,9 @@ static void dev_seq_printf_stats(struct
{
struct net_device_stats *stats = dev->get_stats(dev);
+ if (!nx_dev_visible(current->nx_info, dev))
+ return;
+
seq_printf(seq, "%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
"%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
dev->name, stats->rx_bytes, stats->rx_packets,
--- a/net/core/rtnetlink.c 2008-05-21 14:30:05.000000000 -0400
+++ a/net/core/rtnetlink.c 2008-05-21 14:30:41.000000000 -0400
@@ -674,6 +674,8 @@ static int rtnl_dump_ifinfo(struct sk_bu
idx = 0;
for_each_netdev(net, dev) {
+ if (!nx_dev_visible(skb->sk->sk_nx_info, dev))
+ continue;
if (idx < s_idx)
goto cont;
if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
@@ -1207,6 +1209,9 @@ void rtmsg_ifinfo(int type, struct net_d
struct sk_buff *skb;
int err = -ENOBUFS;
+ if (!nx_dev_visible(current->nx_info, dev))
+ return;
+
skb = nlmsg_new(if_nlmsg_size(dev), GFP_KERNEL);
if (skb == NULL)
goto errout;
--- a/net/core/sock.c 2008-04-17 12:05:44.000000000 -0400
+++ a/net/core/sock.c 2008-04-23 14:31:31.000000000 -0400
@@ -126,6 +126,10 @@
#include <linux/ipsec.h>
#include <linux/filter.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
#ifdef CONFIG_INET
#include <net/tcp.h>
@@ -907,6 +911,8 @@ static struct sock *sk_prot_alloc(struct
if (!try_module_get(prot->owner))
goto out_free_sec;
}
+ sock_vx_init(sk);
+ sock_nx_init(sk);
return sk;
@@ -984,6 +990,11 @@ void sk_free(struct sock *sk)
__FUNCTION__, atomic_read(&sk->sk_omem_alloc));
put_net(sk->sk_net);
+ vx_sock_dec(sk);
+ clr_vx_info(&sk->sk_vx_info);
+ sk->sk_xid = -1;
+ clr_nx_info(&sk->sk_nx_info);
+ sk->sk_nid = -1;
sk_prot_free(sk->sk_prot_creator, sk);
}
@@ -999,6 +1010,8 @@ struct sock *sk_clone(const struct sock
/* SANITY */
get_net(newsk->sk_net);
+ sock_vx_init(newsk);
+ sock_nx_init(newsk);
sk_node_init(&newsk->sk_node);
sock_lock_init(newsk);
bh_lock_sock(newsk);
@@ -1045,6 +1058,12 @@ struct sock *sk_clone(const struct sock
newsk->sk_priority = 0;
atomic_set(&newsk->sk_refcnt, 2);
+ set_vx_info(&newsk->sk_vx_info, sk->sk_vx_info);
+ newsk->sk_xid = sk->sk_xid;
+ vx_sock_inc(newsk);
+ set_nx_info(&newsk->sk_nx_info, sk->sk_nx_info);
+ newsk->sk_nid = sk->sk_nid;
+
/*
* Increment the counter in the same struct proto as the master
* sock (sk_refcnt_debug_inc uses newsk->sk_prot->socks, that
@@ -1727,6 +1746,11 @@ void sock_init_data(struct socket *sock,
sk->sk_stamp = ktime_set(-1L, 0);
+ set_vx_info(&sk->sk_vx_info, current->vx_info);
+ sk->sk_xid = vx_current_xid();
+ vx_sock_inc(sk);
+ set_nx_info(&sk->sk_nx_info, current->nx_info);
+ sk->sk_nid = nx_current_nid();
atomic_set(&sk->sk_refcnt, 1);
atomic_set(&sk->sk_drops, 0);
}
--- a/net/ipv4/af_inet.c 2008-04-17 12:05:44.000000000 -0400
+++ a/net/ipv4/af_inet.c 2008-04-19 16:08:42.000000000 -0400
@@ -115,6 +115,7 @@
#ifdef CONFIG_IP_MROUTE
#include <linux/mroute.h>
#endif
+#include <linux/vs_limit.h>
DEFINE_SNMP_STAT(struct linux_mib, net_statistics) __read_mostly;
@@ -317,9 +318,12 @@ lookup_protocol:
}
err = -EPERM;
+ if ((protocol == IPPROTO_ICMP) &&
+ nx_capable(answer->capability, NXC_RAW_ICMP))
+ goto override;
if (answer->capability > 0 && !capable(answer->capability))
goto out_rcu_unlock;
-
+override:
sock->ops = answer->ops;
answer_prot = answer->prot;
answer_no_check = answer->no_check;
@@ -433,6 +437,7 @@ int inet_bind(struct socket *sock, struc
struct sockaddr_in *addr = (struct sockaddr_in *)uaddr;
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
+ struct nx_v4_sock_addr nsa;
unsigned short snum;
int chk_addr_ret;
int err;
@@ -446,7 +451,11 @@ int inet_bind(struct socket *sock, struc
if (addr_len < sizeof(struct sockaddr_in))
goto out;
- chk_addr_ret = inet_addr_type(&init_net, addr->sin_addr.s_addr);
+ err = v4_map_sock_addr(inet, addr, &nsa);
+ if (err)
+ goto out;
+
+ chk_addr_ret = inet_addr_type(&init_net, nsa.saddr);
/* Not specified by any standard per-se, however it breaks too
* many applications when removed. It is unfortunate since
@@ -458,7 +467,7 @@ int inet_bind(struct socket *sock, struc
err = -EADDRNOTAVAIL;
if (!sysctl_ip_nonlocal_bind &&
!inet->freebind &&
- addr->sin_addr.s_addr != htonl(INADDR_ANY) &&
+ nsa.saddr != htonl(INADDR_ANY) &&
chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST &&
chk_addr_ret != RTN_BROADCAST)
@@ -483,7 +492,7 @@ int inet_bind(struct socket *sock, struc
if (sk->sk_state != TCP_CLOSE || inet->num)
goto out_release_sock;
- inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
+ v4_set_sock_addr(inet, &nsa);
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->saddr = 0; /* Use device */
@@ -676,11 +685,13 @@ int inet_getname(struct socket *sock, st
peer == 1))
return -ENOTCONN;
sin->sin_port = inet->dport;
- sin->sin_addr.s_addr = inet->daddr;
+ sin->sin_addr.s_addr =
+ nx_map_sock_lback(sk->sk_nx_info, inet->daddr);
} else {
__be32 addr = inet->rcv_saddr;
if (!addr)
addr = inet->saddr;
+ addr = nx_map_sock_lback(sk->sk_nx_info, addr);
sin->sin_port = inet->sport;
sin->sin_addr.s_addr = addr;
}
--- a/net/ipv4/devinet.c 2008-04-17 12:05:44.000000000 -0400
+++ a/net/ipv4/devinet.c 2008-04-19 15:14:52.000000000 -0400
@@ -421,6 +421,7 @@ struct in_device *inetdev_by_index(struc
return in_dev;
}
+
/* Called only from RTNL semaphored context. No locks. */
struct in_ifaddr *inet_ifa_byprefix(struct in_device *in_dev, __be32 prefix,
@@ -672,6 +673,8 @@ int devinet_ioctl(unsigned int cmd, void
*colon = ':';
if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
+ struct nx_info *nxi = current->nx_info;
+
if (tryaddrmatch) {
/* Matthias Andree */
/* compare label and address (4.4BSD style) */
@@ -680,6 +683,8 @@ int devinet_ioctl(unsigned int cmd, void
This is checked above. */
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
ifap = &ifa->ifa_next) {
+ if (!nx_v4_ifa_visible(nxi, ifa))
+ continue;
if (!strcmp(ifr.ifr_name, ifa->ifa_label) &&
sin_orig.sin_addr.s_addr ==
ifa->ifa_address) {
@@ -692,9 +697,12 @@ int devinet_ioctl(unsigned int cmd, void
comparing just the label */
if (!ifa) {
for (ifap = &in_dev->ifa_list; (ifa = *ifap) != NULL;
- ifap = &ifa->ifa_next)
+ ifap = &ifa->ifa_next) {
+ if (!nx_v4_ifa_visible(nxi, ifa))
+ continue;
if (!strcmp(ifr.ifr_name, ifa->ifa_label))
break;
+ }
}
}
@@ -846,6 +854,8 @@ static int inet_gifconf(struct net_devic
goto out;
for (; ifa; ifa = ifa->ifa_next) {
+ if (!nx_v4_ifa_visible(current->nx_info, ifa))
+ continue;
if (!buf) {
done += sizeof(ifr);
continue;
@@ -1171,6 +1181,7 @@ static int inet_dump_ifaddr(struct sk_bu
struct net_device *dev;
struct in_device *in_dev;
struct in_ifaddr *ifa;
+ struct sock *sk = skb->sk;
int s_ip_idx, s_idx = cb->args[0];
if (net != &init_net)
@@ -1188,6 +1199,8 @@ static int inet_dump_ifaddr(struct sk_bu
for (ifa = in_dev->ifa_list, ip_idx = 0; ifa;
ifa = ifa->ifa_next, ip_idx++) {
+ if (sk && !nx_v4_ifa_visible(sk->sk_nx_info, ifa))
+ continue;
if (ip_idx < s_ip_idx)
continue;
if (inet_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
--- a/net/ipv4/fib_hash.c 2008-04-17 12:05:44.000000000 -0400
+++ a/net/ipv4/fib_hash.c 2008-04-19 15:14:52.000000000 -0400
@@ -1025,7 +1025,7 @@ static int fib_seq_show(struct seq_file
prefix = f->fn_key;
mask = FZ_MASK(iter->zone);
flags = fib_flag_trans(fa->fa_type, mask, fi);
- if (fi)
+ if (fi && nx_dev_visible(current->nx_info, fi->fib_dev))
snprintf(bf, sizeof(bf),
"%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u",
fi->fib_dev ? fi->fib_dev->name : "*", prefix,
--- a/net/ipv4/inet_connection_sock.c 2008-04-17 12:05:44.000000000 -0400
+++ a/net/ipv4/inet_connection_sock.c 2008-04-19 15:14:52.000000000 -0400
@@ -47,10 +47,40 @@ void inet_get_local_port_range(int *low,
}
EXPORT_SYMBOL(inet_get_local_port_range);
+int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
+{
+ __be32 sk1_rcv_saddr = inet_rcv_saddr(sk1),
+ sk2_rcv_saddr = inet_rcv_saddr(sk2);
+
+ if (inet_v6_ipv6only(sk2))
+ return 0;
+
+ if (sk1_rcv_saddr &&
+ sk2_rcv_saddr &&
+ sk1_rcv_saddr == sk2_rcv_saddr)
+ return 1;
+
+ if (sk1_rcv_saddr &&
+ !sk2_rcv_saddr &&
+ v4_addr_in_nx_info(sk2->sk_nx_info, sk1_rcv_saddr, NXA_MASK_BIND))
+ return 1;
+
+ if (sk2_rcv_saddr &&
+ !sk1_rcv_saddr &&
+ v4_addr_in_nx_info(sk1->sk_nx_info, sk2_rcv_saddr, NXA_MASK_BIND))
+ return 1;
+
+ if (!sk1_rcv_saddr &&
+ !sk2_rcv_saddr &&
+ nx_v4_addr_conflict(sk1->sk_nx_info, sk2->sk_nx_info))
+ return 1;
+
+ return 0;
+}
+
int inet_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb)
{
- const __be32 sk_rcv_saddr = inet_rcv_saddr(sk);
struct sock *sk2;
struct hlist_node *node;
int reuse = sk->sk_reuse;
@@ -63,9 +93,7 @@ int inet_csk_bind_conflict(const struct
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
if (!reuse || !sk2->sk_reuse ||
sk2->sk_state == TCP_LISTEN) {
- const __be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
- if (!sk2_rcv_saddr || !sk_rcv_saddr ||
- sk2_rcv_saddr == sk_rcv_saddr)
+ if (ipv4_rcv_saddr_equal(sk, sk2))
break;
}
}
--- a/net/ipv4/inet_diag.c 2008-04-17 12:05:44.000000000 -0400
+++ a/net/ipv4/inet_diag.c 2008-04-19 16:05:51.000000000 -0400
@@ -34,6 +34,8 @@
#include <linux/stddef.h>
#include <linux/inet_diag.h>
+#include <linux/vs_network.h>
+#include <linux/vs_inet.h>
static const struct inet_diag_handler **inet_diag_table;
@@ -122,8 +124,8 @@ static int inet_csk_diag_fill(struct soc
r->id.idiag_sport = inet->sport;
r->id.idiag_dport = inet->dport;
- r->id.idiag_src[0] = inet->rcv_saddr;
- r->id.idiag_dst[0] = inet->daddr;
+ r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, inet->rcv_saddr);
+ r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, inet->daddr);
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
if (r->idiag_family == AF_INET6) {
@@ -210,8 +212,8 @@ static int inet_twsk_diag_fill(struct in
r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
- r->id.idiag_src[0] = tw->tw_rcv_saddr;
- r->id.idiag_dst[0] = tw->tw_daddr;
+ r->id.idiag_src[0] = nx_map_sock_lback(tw->tw_nx_info, tw->tw_rcv_saddr);
+ r->id.idiag_dst[0] = nx_map_sock_lback(tw->tw_nx_info, tw->tw_daddr);
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ);
@@ -268,6 +270,7 @@ static int inet_diag_get_exact(struct sk
err = -EINVAL;
if (req->idiag_family == AF_INET) {
+ /* TODO: lback */
sk = inet_lookup(&init_net, hashinfo, req->id.idiag_dst[0],
req->id.idiag_dport, req->id.idiag_src[0],
req->id.idiag_sport, req->id.idiag_if);
@@ -510,6 +513,7 @@ static int inet_csk_diag_dump(struct soc
} else
#endif
{
+ /* TODO: lback */
entry.saddr = &inet->rcv_saddr;
entry.daddr = &inet->daddr;
}
@@ -546,6 +550,7 @@ static int inet_twsk_diag_dump(struct in
} else
#endif
{
+ /* TODO: lback */
entry.saddr = &tw->tw_rcv_saddr;
entry.daddr = &tw->tw_daddr;
}
@@ -592,8 +597,8 @@ static int inet_diag_fill_req(struct sk_
r->id.idiag_sport = inet->sport;
r->id.idiag_dport = ireq->rmt_port;
- r->id.idiag_src[0] = ireq->loc_addr;
- r->id.idiag_dst[0] = ireq->rmt_addr;
+ r->id.idiag_src[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->loc_addr);
+ r->id.idiag_dst[0] = nx_map_sock_lback(sk->sk_nx_info, ireq->rmt_addr);
r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
@@ -663,6 +668,7 @@ static int inet_diag_dump_reqs(struct sk
continue;
if (bc) {
+ /* TODO: lback */
entry.saddr =
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
(entry.family == AF_INET6) ?
@@ -731,6 +737,8 @@ static int inet_diag_dump(struct sk_buff
sk_for_each(sk, node, &hashinfo->listening_hash[i]) {
struct inet_sock *inet = inet_sk(sk);
+ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+ continue;
if (num < s_num) {
num++;
continue;
@@ -792,6 +800,8 @@ skip_listen_ht:
sk_for_each(sk, node, &head->chain) {
struct inet_sock *inet = inet_sk(sk);
+ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+ continue;
if (num < s_num)
goto next_normal;
if (!(r->idiag_states & (1 << sk->sk_state)))
@@ -816,6 +826,8 @@ next_normal:
inet_twsk_for_each(tw, node,
&head->twchain) {
+ if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))
+ continue;
if (num < s_num)
goto next_dying;
if (r->id.idiag_sport != tw->tw_sport &&
--- a/net/ipv4/inet_hashtables.c 2008-04-17 12:05:44.000000000 -0400
+++ a/net/ipv4/inet_hashtables.c 2008-04-19 15:14:52.000000000 -0400
@@ -21,6 +21,7 @@
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
+#include <net/route.h>
#include <net/ip.h>
/*
@@ -144,11 +145,10 @@ static struct sock *inet_lookup_listener
const __be32 rcv_saddr = inet->rcv_saddr;
int score = sk->sk_family == PF_INET ? 1 : 0;
- if (rcv_saddr) {
- if (rcv_saddr != daddr)
- continue;
+ if (v4_inet_addr_match(sk->sk_nx_info, daddr, rcv_saddr))
score += 2;
- }
+ else
+ continue;
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
continue;
@@ -180,7 +180,7 @@ struct sock *__inet_lookup_listener(stru
const struct inet_sock *inet = inet_sk((sk = __sk_head(head)));
if (inet->num == hnum && !sk->sk_node.next &&
- (!inet->rcv_saddr || inet->rcv_saddr == daddr) &&
+ v4_inet_addr_match(sk->sk_nx_info, daddr, inet->rcv_saddr) &&
(sk->sk_family == PF_INET || !ipv6_only_sock(sk)) &&
!sk->sk_bound_dev_if && sk->sk_net == net)
goto sherry_cache;
--- a/net/ipv4/netfilter/nf_nat_helper.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv4/netfilter/nf_nat_helper.c 2008-04-19 15:14:52.000000000 -0400
@@ -18,6 +18,7 @@
#include <net/tcp.h>
#include <linux/netfilter_ipv4.h>
+#include <net/route.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_ecache.h>
--- a/net/ipv4/netfilter.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv4/netfilter.c 2008-04-19 15:14:52.000000000 -0400
@@ -4,7 +4,7 @@
#include <linux/netfilter_ipv4.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
-#include <net/route.h>
+// #include <net/route.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/netfilter/nf_queue.h>
--- a/net/ipv4/raw.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv4/raw.c 2008-04-29 20:28:52.000000000 -0400
@@ -126,7 +126,7 @@ static struct sock *__raw_v4_lookup(stru
if (sk->sk_net == net && inet->num == num &&
!(inet->daddr && inet->daddr != raddr) &&
- !(inet->rcv_saddr && inet->rcv_saddr != laddr) &&
+ v4_sock_addr_match(sk->sk_nx_info, inet, laddr) &&
!(sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
goto found; /* gotcha */
}
@@ -382,6 +382,12 @@ static int raw_send_hdrinc(struct sock *
icmp_out_count(((struct icmphdr *)
skb_transport_header(skb))->type);
+ err = -EPERM;
+ if (!nx_check(0, VS_ADMIN) && !capable(CAP_NET_RAW) &&
+ sk->sk_nx_info &&
+ !v4_addr_in_nx_info(sk->sk_nx_info, iph->saddr, NXA_MASK_BIND))
+ goto error_free;
+
err = NF_HOOK(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
dst_output);
if (err > 0)
@@ -393,6 +399,7 @@ out:
error_fault:
err = -EFAULT;
+error_free:
kfree_skb(skb);
error:
IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
@@ -560,6 +567,13 @@ static int raw_sendmsg(struct kiocb *ioc
}
security_sk_classify_flow(sk, &fl);
+ if (sk->sk_nx_info) {
+ err = ip_v4_find_src(sk->sk_net,
+ sk->sk_nx_info, &rt, &fl);
+
+ if (err)
+ goto done;
+ }
err = ip_route_output_flow(&init_net, &rt, &fl, sk, 1);
}
if (err)
@@ -622,17 +636,19 @@ static int raw_bind(struct sock *sk, str
{
struct inet_sock *inet = inet_sk(sk);
struct sockaddr_in *addr = (struct sockaddr_in *) uaddr;
+ struct nx_v4_sock_addr nsa = { 0 };
int ret = -EINVAL;
int chk_addr_ret;
if (sk->sk_state != TCP_CLOSE || addr_len < sizeof(struct sockaddr_in))
goto out;
- chk_addr_ret = inet_addr_type(sk->sk_net, addr->sin_addr.s_addr);
+ v4_map_sock_addr(inet, addr, &nsa);
+ chk_addr_ret = inet_addr_type(sk->sk_net, nsa.saddr);
ret = -EADDRNOTAVAIL;
- if (addr->sin_addr.s_addr && chk_addr_ret != RTN_LOCAL &&
+ if (nsa.saddr && chk_addr_ret != RTN_LOCAL &&
chk_addr_ret != RTN_MULTICAST && chk_addr_ret != RTN_BROADCAST)
goto out;
- inet->rcv_saddr = inet->saddr = addr->sin_addr.s_addr;
+ v4_set_sock_addr(inet, &nsa);
if (chk_addr_ret == RTN_MULTICAST || chk_addr_ret == RTN_BROADCAST)
inet->saddr = 0; /* Use device */
sk_dst_reset(sk);
@@ -684,7 +700,8 @@ static int raw_recvmsg(struct kiocb *ioc
/* Copy the address. */
if (sin) {
sin->sin_family = AF_INET;
- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_addr.s_addr =
+ nx_map_sock_lback(sk->sk_nx_info, ip_hdr(skb)->saddr);
sin->sin_port = 0;
memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
}
@@ -862,7 +879,8 @@ static struct sock *raw_get_first(struct
struct hlist_node *node;
sk_for_each(sk, node, &state->h->ht[state->bucket])
- if (sk->sk_net == state->p.net)
+ if ((sk->sk_net == state->p.net) &&
+ nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
goto found;
}
sk = NULL;
@@ -878,7 +896,8 @@ static struct sock *raw_get_next(struct
sk = sk_next(sk);
try_again:
;
- } while (sk && sk->sk_net != state->p.net);
+ } while (sk && ((sk->sk_net != state->p.net) ||
+ !nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
if (!sk && ++state->bucket < RAW_HTABLE_SIZE) {
sk = sk_head(&state->h->ht[state->bucket]);
@@ -937,7 +956,10 @@ static void raw_sock_seq_show(struct seq
seq_printf(seq, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p %d",
- i, src, srcp, dest, destp, sp->sk_state,
+ i,
+ nx_map_sock_lback(current_nx_info(), src), srcp,
+ nx_map_sock_lback(current_nx_info(), dest), destp,
+ sp->sk_state,
atomic_read(&sp->sk_wmem_alloc),
atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
--- a/net/ipv4/syncookies.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv4/syncookies.c 2008-04-19 15:14:52.000000000 -0400
@@ -20,6 +20,7 @@
#include <linux/cryptohash.h>
#include <linux/kernel.h>
#include <net/tcp.h>
+#include <net/route.h>
extern int sysctl_tcp_syncookies;
--- a/net/ipv4/tcp.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv4/tcp.c 2008-04-19 15:14:52.000000000 -0400
@@ -263,6 +263,7 @@
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/crypto.h>
+#include <linux/in.h>
#include <net/icmp.h>
#include <net/tcp.h>
--- a/net/ipv4/tcp_ipv4.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv4/tcp_ipv4.c 2008-04-19 15:14:52.000000000 -0400
@@ -1965,6 +1965,12 @@ static void *listening_get_next(struct s
req = req->dl_next;
while (1) {
while (req) {
+ vxdprintk(VXD_CBIT(net, 6),
+ "sk,req: %p [#%d] (from %d)", req->sk,
+ (req->sk)?req->sk->sk_nid:0, nx_current_nid());
+ if (req->sk &&
+ !nx_check(req->sk->sk_nid, VS_WATCH_P | VS_IDENT))
+ continue;
if (req->rsk_ops->family == st->family) {
cur = req;
goto out;
@@ -1989,6 +1995,10 @@ get_req:
}
get_sk:
sk_for_each_from(sk, node) {
+ vxdprintk(VXD_CBIT(net, 6), "sk: %p [#%d] (from %d)",
+ sk, sk->sk_nid, nx_current_nid());
+ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+ continue;
if (sk->sk_family == st->family) {
cur = sk;
goto out;
@@ -2038,18 +2048,26 @@ static void *established_get_first(struc
read_lock_bh(lock);
sk_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
- if (sk->sk_family != st->family) {
+ vxdprintk(VXD_CBIT(net, 6),
+ "sk,egf: %p [#%d] (from %d)",
+ sk, sk->sk_nid, nx_current_nid());
+ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+ continue;
+ if (sk->sk_family != st->family)
continue;
- }
rc = sk;
goto out;
}
st->state = TCP_SEQ_STATE_TIME_WAIT;
inet_twsk_for_each(tw, node,
&tcp_hashinfo.ehash[st->bucket].twchain) {
- if (tw->tw_family != st->family) {
+ vxdprintk(VXD_CBIT(net, 6),
+ "tw: %p [#%d] (from %d)",
+ tw, tw->tw_nid, nx_current_nid());
+ if (!nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))
+ continue;
+ if (tw->tw_family != st->family)
continue;
- }
rc = tw;
goto out;
}
@@ -2073,7 +2091,8 @@ static void *established_get_next(struct
tw = cur;
tw = tw_next(tw);
get_tw:
- while (tw && tw->tw_family != st->family) {
+ while (tw && (tw->tw_family != st->family ||
+ !nx_check(tw->tw_nid, VS_WATCH_P | VS_IDENT))) {
tw = tw_next(tw);
}
if (tw) {
@@ -2094,6 +2113,11 @@ get_tw:
sk = sk_next(sk);
sk_for_each_from(sk, node) {
+ vxdprintk(VXD_CBIT(net, 6),
+ "sk,egn: %p [#%d] (from %d)",
+ sk, sk->sk_nid, nx_current_nid());
+ if (!nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
+ continue;
if (sk->sk_family == st->family)
goto found;
}
@@ -2266,9 +2290,9 @@ static void get_openreq4(struct sock *sk
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p",
i,
- ireq->loc_addr,
+ nx_map_sock_lback(current_nx_info(), ireq->loc_addr),
ntohs(inet_sk(sk)->sport),
- ireq->rmt_addr,
+ nx_map_sock_lback(current_nx_info(), ireq->rmt_addr),
ntohs(ireq->rmt_port),
TCP_SYN_RECV,
0, 0, /* could print option size, but that is af dependent. */
@@ -2310,7 +2334,10 @@ static void get_tcp4_sock(struct sock *s
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5d %8d %lu %d %p %u %u %u %u %d",
- i, src, srcp, dest, destp, sk->sk_state,
+ i,
+ nx_map_sock_lback(current_nx_info(), src), srcp,
+ nx_map_sock_lback(current_nx_info(), dest), destp,
+ sk->sk_state,
tp->write_seq - tp->snd_una,
sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
(tp->rcv_nxt - tp->copied_seq),
@@ -2345,7 +2372,10 @@ static void get_timewait4_sock(struct in
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p",
- i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
+ i,
+ nx_map_sock_lback(current_nx_info(), src), srcp,
+ nx_map_sock_lback(current_nx_info(), dest), destp,
+ tw->tw_substate, 0, 0,
3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
atomic_read(&tw->tw_refcnt), tw);
}
--- a/net/ipv4/tcp_minisocks.c 2008-04-17 11:31:40.000000000 -0400
+++ a/net/ipv4/tcp_minisocks.c 2008-04-19 15:14:52.000000000 -0400
@@ -28,6 +28,10 @@
#include <net/inet_common.h>
#include <net/xfrm.h>
+#include <linux/vs_limit.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_context.h>
+
#ifdef CONFIG_SYSCTL
#define SYNC_INIT 0 /* let the user enable it */
#else
@@ -293,6 +297,11 @@ void tcp_time_wait(struct sock *sk, int
tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
+ tw->tw_xid = sk->sk_xid;
+ tw->tw_vx_info = NULL;
+ tw->tw_nid = sk->sk_nid;
+ tw->tw_nx_info = NULL;
+
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
if (tw->tw_family == PF_INET6) {
struct ipv6_pinfo *np = inet6_sk(sk);
--- a/net/ipv4/udp.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv4/udp.c 2008-04-21 12:41:01.000000000 -0400
@@ -246,14 +246,7 @@ int udp_get_port(struct sock *sk, unsign
return __udp_lib_get_port(sk, snum, udp_hash, scmp);
}
-int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2)
-{
- struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
-
- return ( !ipv6_only_sock(sk2) &&
- (!inet1->rcv_saddr || !inet2->rcv_saddr ||
- inet1->rcv_saddr == inet2->rcv_saddr ));
-}
+extern int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2);
static inline int udp_v4_get_port(struct sock *sk, unsigned short snum)
{
@@ -273,16 +266,23 @@ static struct sock *__udp4_lib_lookup(st
int badness = -1;
read_lock(&udp_hash_lock);
+
sk_for_each(sk, node, &udptable[hnum & (UDP_HTABLE_SIZE - 1)]) {
struct inet_sock *inet = inet_sk(sk);
if (sk->sk_net == net && sk->sk_hash == hnum &&
!ipv6_only_sock(sk)) {
int score = (sk->sk_family == PF_INET ? 1 : 0);
+
if (inet->rcv_saddr) {
if (inet->rcv_saddr != daddr)
continue;
score+=2;
+ } else {
+ /* block non nx_info ips */
+ if (!v4_addr_in_nx_info(sk->sk_nx_info,
+ daddr, NXA_MASK_BIND))
+ continue;
}
if (inet->daddr) {
if (inet->daddr != saddr)
@@ -308,6 +308,7 @@ static struct sock *__udp4_lib_lookup(st
}
}
}
+
if (result)
sock_hold(result);
read_unlock(&udp_hash_lock);
@@ -329,7 +330,7 @@ static inline struct sock *udp_v4_mcast_
if (s->sk_hash != hnum ||
(inet->daddr && inet->daddr != rmt_addr) ||
(inet->dport != rmt_port && inet->dport) ||
- (inet->rcv_saddr && inet->rcv_saddr != loc_addr) ||
+ !v4_sock_addr_match(sk->sk_nx_info, inet, loc_addr) ||
ipv6_only_sock(s) ||
(s->sk_bound_dev_if && s->sk_bound_dev_if != dif))
continue;
@@ -662,7 +663,15 @@ int udp_sendmsg(struct kiocb *iocb, stru
.uli_u = { .ports =
{ .sport = inet->sport,
.dport = dport } } };
+ struct nx_info *nxi = sk->sk_nx_info;
+ struct net *net = sk->sk_net;
+
security_sk_classify_flow(sk, &fl);
+
+ err = ip_v4_find_src(net, nxi, &rt, &fl);
+ if (err)
+ goto out;
+
err = ip_route_output_flow(&init_net, &rt, &fl, sk, 1);
if (err) {
if (err == -ENETUNREACH)
@@ -905,7 +914,8 @@ try_again:
{
sin->sin_family = AF_INET;
sin->sin_port = udp_hdr(skb)->source;
- sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
+ sin->sin_addr.s_addr = nx_map_sock_lback(
+ skb->sk->sk_nx_info, ip_hdr(skb)->saddr);
memset(sin->sin_zero, 0, sizeof(sin->sin_zero));
}
if (inet->cmsg_flags)
@@ -1516,7 +1526,8 @@ static struct sock *udp_get_first(struct
for (state->bucket = 0; state->bucket < UDP_HTABLE_SIZE; ++state->bucket) {
struct hlist_node *node;
sk_for_each(sk, node, state->hashtable + state->bucket) {
- if (sk->sk_family == state->family)
+ if (sk->sk_family == state->family &&
+ nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT))
goto found;
}
}
@@ -1533,7 +1544,8 @@ static struct sock *udp_get_next(struct
sk = sk_next(sk);
try_again:
;
- } while (sk && sk->sk_family != state->family);
+ } while (sk && (sk->sk_family != state->family ||
+ !nx_check(sk->sk_nid, VS_WATCH_P | VS_IDENT)));
if (!sk && ++state->bucket < UDP_HTABLE_SIZE) {
sk = sk_head(state->hashtable + state->bucket);
@@ -1648,7 +1660,10 @@ static void udp4_format_sock(struct sock
sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p",
- bucket, src, srcp, dest, destp, sp->sk_state,
+ bucket,
+ nx_map_sock_lback(current_nx_info(), src), srcp,
+ nx_map_sock_lback(current_nx_info(), dest), destp,
+ sp->sk_state,
atomic_read(&sp->sk_wmem_alloc),
atomic_read(&sp->sk_rmem_alloc),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
--- a/net/ipv6/addrconf.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/addrconf.c 2008-04-19 16:44:23.000000000 -0400
@@ -87,6 +87,8 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/vs_network.h>
+#include <linux/vs_inet6.h>
/* Set to 3 to get tracing... */
#define ACONF_DEBUG 2
@@ -918,7 +920,8 @@ static inline int ipv6_saddr_preferred(i
}
int ipv6_dev_get_saddr(struct net_device *daddr_dev,
- struct in6_addr *daddr, struct in6_addr *saddr)
+ struct in6_addr *daddr, struct in6_addr *saddr,
+ struct nx_info *nxi)
{
struct ipv6_saddr_score hiscore;
struct inet6_ifaddr *ifa_result = NULL;
@@ -964,6 +967,10 @@ int ipv6_dev_get_saddr(struct net_device
score.addr_type = __ipv6_addr_type(&ifa->addr);
+ /* Use only addresses assigned to the context */
+ if (!v6_ifa_in_nx_info(ifa, nxi))
+ continue;
+
/* Rule 0:
* - Tentative Address (RFC2462 section 5.4)
* - A tentative address is not considered
@@ -1181,9 +1188,10 @@ record_it:
int ipv6_get_saddr(struct dst_entry *dst,
- struct in6_addr *daddr, struct in6_addr *saddr)
+ struct in6_addr *daddr, struct in6_addr *saddr,
+ struct nx_info *nxi)
{
- return ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, daddr, saddr);
+ return ipv6_dev_get_saddr(dst ? ip6_dst_idev(dst)->dev : NULL, daddr, saddr, nxi);
}
EXPORT_SYMBOL(ipv6_get_saddr);
@@ -1287,35 +1295,46 @@ struct inet6_ifaddr *ipv6_get_ifaddr(str
return ifp;
}
+extern int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2);
+
int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
{
const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
- __be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
__be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
int sk_ipv6only = ipv6_only_sock(sk);
int sk2_ipv6only = inet_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(sk_rcv_saddr6);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
- if (!sk2_rcv_saddr && !sk_ipv6only)
+ /* FIXME: needs handling for v4 ANY */
+ if (!sk2_rcv_saddr && !sk_ipv6only && !sk2->sk_nx_info)
return 1;
if (addr_type2 == IPV6_ADDR_ANY &&
- !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
+ !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED) &&
+ v6_addr_in_nx_info(sk2->sk_nx_info, sk_rcv_saddr6, -1))
return 1;
if (addr_type == IPV6_ADDR_ANY &&
- !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
+ !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED) &&
+ (sk2_rcv_saddr6 && v6_addr_in_nx_info(sk->sk_nx_info, sk2_rcv_saddr6, -1)))
+ return 1;
+
+ if (addr_type == IPV6_ADDR_ANY &&
+ addr_type2 == IPV6_ADDR_ANY &&
+ nx_v6_addr_conflict(sk->sk_nx_info, sk2->sk_nx_info))
return 1;
if (sk2_rcv_saddr6 &&
+ addr_type != IPV6_ADDR_ANY &&
+ addr_type != IPV6_ADDR_ANY &&
ipv6_addr_equal(sk_rcv_saddr6, sk2_rcv_saddr6))
return 1;
if (addr_type == IPV6_ADDR_MAPPED &&
!sk2_ipv6only &&
- (!sk2_rcv_saddr || !sk_rcv_saddr || sk_rcv_saddr == sk2_rcv_saddr))
+ ipv4_rcv_saddr_equal(sk, sk2))
return 1;
return 0;
@@ -2846,7 +2865,10 @@ static void if6_seq_stop(struct seq_file
static int if6_seq_show(struct seq_file *seq, void *v)
{
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
- seq_printf(seq,
+
+ if (nx_check(0, VS_ADMIN|VS_WATCH) ||
+ v6_addr_in_nx_info(current_nx_info(), &ifp->addr, -1))
+ seq_printf(seq,
NIP6_SEQFMT " %02x %02x %02x %02x %8s\n",
NIP6(ifp->addr),
ifp->idev->dev->ifindex,
@@ -3337,6 +3359,11 @@ static int inet6_dump_addr(struct sk_buf
struct inet6_ifaddr *ifa;
struct ifmcaddr6 *ifmca;
struct ifacaddr6 *ifaca;
+ struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
+
+ /* disable ipv6 on non v6 guests */
+ if (nxi && !nx_info_has_v6(nxi))
+ return skb->len;
s_idx = cb->args[0];
s_ip_idx = ip_idx = cb->args[1];
@@ -3358,6 +3385,8 @@ static int inet6_dump_addr(struct sk_buf
ifa = ifa->if_next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
+ if (!v6_addr_in_nx_info(nxi, &ifa->addr, -1))
+ continue;
err = inet6_fill_ifaddr(skb, ifa,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
@@ -3371,6 +3400,8 @@ static int inet6_dump_addr(struct sk_buf
ifmca = ifmca->next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
+ if (!v6_addr_in_nx_info(nxi, &ifa->addr, -1))
+ continue;
err = inet6_fill_ifmcaddr(skb, ifmca,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
@@ -3384,6 +3415,8 @@ static int inet6_dump_addr(struct sk_buf
ifaca = ifaca->aca_next, ip_idx++) {
if (ip_idx < s_ip_idx)
continue;
+ if (!v6_addr_in_nx_info(nxi, &ifa->addr, -1))
+ continue;
err = inet6_fill_ifacaddr(skb, ifaca,
NETLINK_CB(cb->skb).pid,
cb->nlh->nlmsg_seq,
@@ -3678,6 +3711,11 @@ static int inet6_dump_ifinfo(struct sk_b
int s_idx = cb->args[0];
struct net_device *dev;
struct inet6_dev *idev;
+ struct nx_info *nxi = skb->sk ? skb->sk->sk_nx_info : NULL;
+
+ /* FIXME: maybe disable ipv6 on non v6 guests?
+ if (skb->sk && skb->sk->sk_vx_info)
+ return skb->len; */
if (net != &init_net)
return 0;
@@ -3687,6 +3725,8 @@ static int inet6_dump_ifinfo(struct sk_b
for_each_netdev(&init_net, dev) {
if (idx < s_idx)
goto cont;
+ if (!v6_dev_in_nx_info(dev, nxi))
+ goto cont;
if ((idev = in6_dev_get(dev)) == NULL)
goto cont;
err = inet6_fill_ifinfo(skb, idev, NETLINK_CB(cb->skb).pid,
--- a/net/ipv6/af_inet6.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/af_inet6.c 2008-04-19 15:14:52.000000000 -0400
@@ -43,6 +43,8 @@
#include <linux/netdevice.h>
#include <linux/icmpv6.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -51,6 +53,7 @@
#include <net/tcp.h>
#include <net/ipip.h>
#include <net/protocol.h>
+#include <net/route.h>
#include <net/inet_common.h>
#include <net/transp_v6.h>
#include <net/ip6_route.h>
@@ -148,9 +151,12 @@ lookup_protocol:
}
err = -EPERM;
+ if ((protocol == IPPROTO_ICMPV6) &&
+ nx_capable(answer->capability, NXC_RAW_ICMP))
+ goto override;
if (answer->capability > 0 && !capable(answer->capability))
goto out_rcu_unlock;
-
+override:
sock->ops = answer->ops;
answer_prot = answer->prot;
answer_no_check = answer->no_check;
@@ -248,6 +254,7 @@ int inet6_bind(struct socket *sock, stru
struct sock *sk = sock->sk;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
+ struct nx_v6_sock_addr nsa;
__be32 v4addr = 0;
unsigned short snum;
int addr_type = 0;
@@ -259,6 +266,11 @@ int inet6_bind(struct socket *sock, stru
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
+
+ err = v6_map_sock_addr(inet, addr, &nsa);
+ if (err)
+ return err;
+
addr_type = ipv6_addr_type(&addr->sin6_addr);
if ((addr_type & IPV6_ADDR_MULTICAST) && sock->type == SOCK_STREAM)
return -EINVAL;
@@ -282,6 +294,10 @@ int inet6_bind(struct socket *sock, stru
err = -EADDRNOTAVAIL;
goto out;
}
+ if (!v4_addr_in_nx_info(sk->sk_nx_info, v4addr, NXA_MASK_BIND)) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
} else {
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
@@ -307,6 +323,11 @@ int inet6_bind(struct socket *sock, stru
}
}
+ if (!v6_addr_in_nx_info(sk->sk_nx_info, &addr->sin6_addr, -1)) {
+ err = -EADDRNOTAVAIL;
+ goto out;
+ }
+
/* ipv4 addr of the socket is invalid. Only the
* unspecified and mapped address have a v4 equivalent.
*/
@@ -325,6 +346,8 @@ int inet6_bind(struct socket *sock, stru
}
}
+ v6_set_sock_addr(inet, &nsa);
+
inet->rcv_saddr = v4addr;
inet->saddr = v4addr;
@@ -419,9 +442,11 @@ int inet6_getname(struct socket *sock, s
return -ENOTCONN;
sin->sin6_port = inet->dport;
ipv6_addr_copy(&sin->sin6_addr, &np->daddr);
+ /* FIXME: remap lback? */
if (np->sndflow)
sin->sin6_flowinfo = np->flow_label;
} else {
+ /* FIXME: remap lback? */
if (ipv6_addr_any(&np->rcv_saddr))
ipv6_addr_copy(&sin->sin6_addr, &np->saddr);
else
--- a/net/ipv6/fib6_rules.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/fib6_rules.c 2008-04-19 15:14:52.000000000 -0400
@@ -86,7 +86,7 @@ static int fib6_rule_action(struct fib_r
r->src.plen && !(flags & RT6_LOOKUP_F_HAS_SADDR)) {
struct in6_addr saddr;
if (ipv6_get_saddr(&rt->u.dst, &flp->fl6_dst,
- &saddr))
+ &saddr, NULL))
goto again;
if (!ipv6_prefix_equal(&saddr, &r->src.addr,
r->src.plen))
--- a/net/ipv6/inet6_hashtables.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/inet6_hashtables.c 2008-04-19 15:14:52.000000000 -0400
@@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/random.h>
+#include <linux/vs_inet6.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
@@ -114,6 +115,9 @@ struct sock *inet6_lookup_listener(struc
if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
continue;
score++;
+ } else {
+ if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
+ continue;
}
if (sk->sk_bound_dev_if) {
if (sk->sk_bound_dev_if != dif)
--- a/net/ipv6/ip6_output.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/ip6_output.c 2008-04-19 15:14:52.000000000 -0400
@@ -920,7 +920,7 @@ static int ip6_dst_lookup_tail(struct so
goto out_err_release;
if (ipv6_addr_any(&fl->fl6_src)) {
- err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src);
+ err = ipv6_get_saddr(*dst, &fl->fl6_dst, &fl->fl6_src, sk->sk_nx_info);
if (err)
goto out_err_release;
}
--- a/net/ipv6/Kconfig 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/Kconfig 2008-04-19 15:14:52.000000000 -0400
@@ -4,8 +4,8 @@
# IPv6 as module will cause a CRASH if you try to unload it
config IPV6
- tristate "The IPv6 protocol"
- default m
+ bool "The IPv6 protocol"
+ default n
---help---
This is complemental support for the IP version 6.
You will still be able to do traditional IPv4 networking as well.
--- a/net/ipv6/ndisc.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/ndisc.c 2008-04-19 15:14:52.000000000 -0400
@@ -563,7 +563,7 @@ static void ndisc_send_na(struct net_dev
override = 0;
in6_ifa_put(ifp);
} else {
- if (ipv6_dev_get_saddr(dev, daddr, &tmpaddr))
+ if (ipv6_dev_get_saddr(dev, daddr, &tmpaddr, NULL))
return;
src_addr = &tmpaddr;
}
--- a/net/ipv6/route.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/route.c 2008-04-19 15:14:52.000000000 -0400
@@ -2122,7 +2122,7 @@ static int rt6_fill_node(struct sk_buff
NLA_PUT_U32(skb, RTA_IIF, iif);
else if (dst) {
struct in6_addr saddr_buf;
- if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf) == 0)
+ if (ipv6_get_saddr(&rt->u.dst, dst, &saddr_buf, (skb->sk ? skb->sk->sk_nx_info : NULL)) == 0)
NLA_PUT(skb, RTA_PREFSRC, 16, &saddr_buf);
}
--- a/net/ipv6/tcp_ipv6.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/tcp_ipv6.c 2008-04-19 15:14:52.000000000 -0400
@@ -68,6 +68,7 @@
#include <linux/crypto.h>
#include <linux/scatterlist.h>
+#include <linux/vs_inet6.h>
/* Socket used for sending RSTs and ACKs */
static struct socket *tcp6_socket;
@@ -154,8 +155,15 @@ static int tcp_v6_connect(struct sock *s
* connect() to INADDR_ANY means loopback (BSD'ism).
*/
- if(ipv6_addr_any(&usin->sin6_addr))
- usin->sin6_addr.s6_addr[15] = 0x1;
+ if(ipv6_addr_any(&usin->sin6_addr)) {
+ struct nx_info *nxi = sk->sk_nx_info;
+
+ if (nxi && nx_info_has_v6(nxi))
+ /* FIXME: remap lback? */
+ usin->sin6_addr = nxi->v6.ip;
+ else
+ usin->sin6_addr.s6_addr[15] = 0x1;
+ }
addr_type = ipv6_addr_type(&usin->sin6_addr);
--- a/net/ipv6/udp.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/udp.c 2008-04-19 15:14:52.000000000 -0400
@@ -49,6 +49,7 @@
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/vs_inet6.h>
#include "udp_impl.h"
static inline int udp_v6_get_port(struct sock *sk, unsigned short snum)
@@ -83,6 +84,10 @@ static struct sock *__udp6_lib_lookup(st
if (!ipv6_addr_equal(&np->rcv_saddr, daddr))
continue;
score++;
+ } else {
+ /* block non nx_info ips */
+ if (!v6_addr_in_nx_info(sk->sk_nx_info, daddr, -1))
+ continue;
}
if (!ipv6_addr_any(&np->daddr)) {
if (!ipv6_addr_equal(&np->daddr, saddr))
--- a/net/ipv6/xfrm6_policy.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/ipv6/xfrm6_policy.c 2008-04-19 16:13:21.000000000 -0400
@@ -58,7 +58,7 @@ static int xfrm6_get_saddr(xfrm_address_
return -EHOSTUNREACH;
ipv6_get_saddr(dst, (struct in6_addr *)&daddr->a6,
- (struct in6_addr *)&saddr->a6);
+ (struct in6_addr *)&saddr->a6, NULL);
dst_release(dst);
return 0;
}
--- a/net/netlink/af_netlink.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/netlink/af_netlink.c 2008-04-19 15:14:52.000000000 -0400
@@ -56,6 +56,9 @@
#include <linux/audit.h>
#include <linux/selinux.h>
#include <linux/mutex.h>
+#include <linux/vs_context.h>
+#include <linux/vs_network.h>
+#include <linux/vs_limit.h>
#include <net/net_namespace.h>
#include <net/sock.h>
--- a/net/sctp/ipv6.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/sctp/ipv6.c 2008-04-19 15:14:52.000000000 -0400
@@ -316,7 +316,7 @@ static void sctp_v6_get_saddr(struct sct
__FUNCTION__, asoc, dst, NIP6(daddr->v6.sin6_addr));
if (!asoc) {
- ipv6_get_saddr(dst, &daddr->v6.sin6_addr,&saddr->v6.sin6_addr);
+ ipv6_get_saddr(dst, &daddr->v6.sin6_addr,&saddr->v6.sin6_addr, asoc->base.sk->sk_nx_info);
SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " NIP6_FMT "\n",
NIP6(saddr->v6.sin6_addr));
return;
--- a/net/socket.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/socket.c 2008-04-19 15:14:52.000000000 -0400
@@ -93,6 +93,10 @@
#include <net/sock.h>
#include <linux/netfilter.h>
+#include <linux/vs_base.h>
+#include <linux/vs_socket.h>
+#include <linux/vs_inet.h>
+#include <linux/vs_inet6.h>
static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
@@ -547,7 +551,7 @@ static inline int __sock_sendmsg(struct
struct msghdr *msg, size_t size)
{
struct sock_iocb *si = kiocb_to_siocb(iocb);
- int err;
+ int err, len;
si->sock = sock;
si->scm = NULL;
@@ -558,7 +562,22 @@ static inline int __sock_sendmsg(struct
if (err)
return err;
- return sock->ops->sendmsg(iocb, sock, msg, size);
+ len = sock->ops->sendmsg(iocb, sock, msg, size);
+ if (sock->sk) {
+ if (len == size)
+ vx_sock_send(sock->sk, size);
+ else
+ vx_sock_fail(sock->sk, size);
+ }
+ vxdprintk(VXD_CBIT(net, 7),
+ "__sock_sendmsg: %p[%p,%p,%p;%d/%d]:%d/%d",
+ sock, sock->sk,
+ (sock->sk)?sock->sk->sk_nx_info:0,
+ (sock->sk)?sock->sk->sk_vx_info:0,
+ (sock->sk)?sock->sk->sk_xid:0,
+ (sock->sk)?sock->sk->sk_nid:0,
+ (unsigned int)size, len);
+ return len;
}
int sock_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
@@ -627,7 +646,7 @@ EXPORT_SYMBOL_GPL(__sock_recv_timestamp)
static inline int __sock_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
- int err;
+ int err, len;
struct sock_iocb *si = kiocb_to_siocb(iocb);
si->sock = sock;
@@ -640,7 +659,18 @@ static inline int __sock_recvmsg(struct
if (err)
return err;
- return sock->ops->recvmsg(iocb, sock, msg, size, flags);
+ len = sock->ops->recvmsg(iocb, sock, msg, size, flags);
+ if ((len >= 0) && sock->sk)
+ vx_sock_recv(sock->sk, len);
+ vxdprintk(VXD_CBIT(net, 7),
+ "__sock_recvmsg: %p[%p,%p,%p;%d/%d]:%d/%d",
+ sock, sock->sk,
+ (sock->sk)?sock->sk->sk_nx_info:0,
+ (sock->sk)?sock->sk->sk_vx_info:0,
+ (sock->sk)?sock->sk->sk_xid:0,
+ (sock->sk)?sock->sk->sk_nid:0,
+ (unsigned int)size, len);
+ return len;
}
int sock_recvmsg(struct socket *sock, struct msghdr *msg,
@@ -1105,6 +1135,13 @@ static int __sock_create(struct net *net
if (type < 0 || type >= SOCK_MAX)
return -EINVAL;
+ if (!nx_check(0, VS_ADMIN)) {
+ if (family == PF_INET && !current_nx_info_has_v4())
+ return -EAFNOSUPPORT;
+ if (family == PF_INET6 && !current_nx_info_has_v6())
+ return -EAFNOSUPPORT;
+ }
+
/* Compatibility.
This uglymoron is moved from INET layer to here to avoid
@@ -1222,6 +1259,7 @@ asmlinkage long sys_socket(int family, i
if (retval < 0)
goto out;
+ set_bit(SOCK_USER_SOCKET, &sock->flags);
retval = sock_map_fd(sock);
if (retval < 0)
goto out_release;
@@ -1254,10 +1292,12 @@ asmlinkage long sys_socketpair(int famil
err = sock_create(family, type, protocol, &sock1);
if (err < 0)
goto out;
+ set_bit(SOCK_USER_SOCKET, &sock1->flags);
err = sock_create(family, type, protocol, &sock2);
if (err < 0)
goto out_release_1;
+ set_bit(SOCK_USER_SOCKET, &sock2->flags);
err = sock1->ops->socketpair(sock1, sock2);
if (err < 0)
--- a/net/sunrpc/auth.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/sunrpc/auth.c 2008-04-19 15:14:52.000000000 -0400
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/sunrpc/clnt.h>
#include <linux/spinlock.h>
+#include <linux/vs_tag.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
@@ -345,6 +346,7 @@ rpcauth_lookupcred(struct rpc_auth *auth
struct auth_cred acred = {
.uid = current->fsuid,
.gid = current->fsgid,
+ .tag = dx_current_tag(),
.group_info = current->group_info,
};
struct rpc_cred *ret;
@@ -382,6 +384,7 @@ rpcauth_bindcred(struct rpc_task *task)
struct auth_cred acred = {
.uid = current->fsuid,
.gid = current->fsgid,
+ .tag = dx_current_tag(),
.group_info = current->group_info,
};
struct rpc_cred *ret;
--- a/net/sunrpc/auth_unix.c 2008-04-17 10:37:27.000000000 -0400
+++ a/net/sunrpc/auth_unix.c 2008-04-19 15:14:52.000000000 -0400
@@ -11,12 +11,14 @@
#include <linux/module.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/auth.h>
+#include <linux/vs_tag.h>
#define NFS_NGROUPS 16
struct unx_cred {
struct rpc_cred uc_base;
gid_t uc_gid;
+ tag_t uc_tag;
gid_t uc_gids[NFS_NGROUPS];
};
#define uc_uid uc_base.cr_uid
@@ -73,6 +75,7 @@ unx_create_cred(struct rpc_auth *auth, s
if (flags & RPCAUTH_LOOKUP_ROOTCREDS) {
cred->uc_uid = 0;
cred->uc_gid = 0;
+ cred->uc_tag = dx_current_tag();
cred->uc_gids[0] = NOGROUP;
} else {
int groups = acred->group_info->ngroups;
@@ -80,6 +83,7 @@ unx_create_cred(struct rpc_auth *auth, s
groups = NFS_NGROUPS;
cred->uc_gid = acred->gid;
+ cred->uc_tag = acred->tag;
for (i = 0; i < groups; i++)
cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
if (i < NFS_NGROUPS)
@@ -124,7 +128,8 @@ unx_match(struct auth_cred *acred, struc
int groups;
if (cred->uc_uid != acred->uid
- || cred->uc_gid != acred->gid)
+ || cred->uc_gid != acred->gid
+ || cred->uc_tag != acred->tag)
return 0;
groups = acred->group_info->ngroups;
@@ -150,7 +155,7 @@ unx_marshal(struct rpc_task *task, __be3
struct rpc_clnt *clnt = task->tk_client;
struct unx_cred *cred = container_of(task->tk_msg.rpc_cred, struct unx_cred, uc_base);
__be32 *base, *hold;
- int i;
+ int i, tag;
*p++ = htonl(RPC_AUTH_UNIX);
base = p++;
@@ -160,9 +165,12 @@ unx_marshal(struct rpc_task *task, __be3
* Copy the UTS nodename captured when the client was created.
*/
p = xdr_encode_array(p, clnt->cl_nodename, clnt->cl_nodelen);
+ tag = task->tk_client->cl_tag;
- *p++ = htonl((u32) cred->uc_uid);
- *p++ = htonl((u32) cred->uc_gid);
+ *p++ = htonl((u32) TAGINO_UID(tag,
+ cred->uc_uid, cred->uc_tag));
+ *p++ = htonl((u32) TAGINO_GID(tag,
+ cred->uc_gid, cred->uc_tag));
hold = p++;
for (i = 0; i < 16 && cred->uc_gids[i] != (gid_t) NOGROUP; i++)
*p++ = htonl((u32) cred->uc_gids[i]);
--- a/net/sunrpc/clnt.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/sunrpc/clnt.c 2008-04-19 16:09:36.000000000 -0400
@@ -31,6 +31,7 @@
#include <linux/utsname.h>
#include <linux/workqueue.h>
#include <linux/in6.h>
+#include <linux/vs_cvirt.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/rpc_pipe_fs.h>
@@ -324,7 +325,9 @@ struct rpc_clnt *rpc_create(struct rpc_c
clnt->cl_autobind = 1;
if (args->flags & RPC_CLNT_CREATE_DISCRTRY)
clnt->cl_discrtry = 1;
-
+ /* TODO: handle RPC_CLNT_CREATE_TAGGED
+ if (args->flags & RPC_CLNT_CREATE_TAGGED)
+ clnt->cl_tag = 1; */
return clnt;
}
EXPORT_SYMBOL_GPL(rpc_create);
--- a/net/unix/af_unix.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/unix/af_unix.c 2008-04-19 15:55:39.000000000 -0400
@@ -116,6 +116,8 @@
#include <linux/mount.h>
#include <net/checksum.h>
#include <linux/security.h>
+#include <linux/vs_context.h>
+#include <linux/vs_limit.h>
static struct hlist_head unix_socket_table[UNIX_HASH_SIZE + 1];
static DEFINE_SPINLOCK(unix_table_lock);
@@ -255,6 +257,8 @@ static struct sock *__unix_find_socket_b
if (s->sk_net != net)
continue;
+ if (!nx_check(s->sk_nid, VS_WATCH_P | VS_IDENT))
+ continue;
if (u->addr->len == len &&
!memcmp(u->addr->name, sunname, len))
goto found;
@@ -819,7 +823,7 @@ static int unix_bind(struct socket *sock
*/
mode = S_IFSOCK |
(SOCK_INODE(sock)->i_mode & ~current->fs->umask);
- err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
+ err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0, NULL);
if (err)
goto out_mknod_dput;
mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
--- a/net/x25/af_x25.c 2008-04-17 12:05:45.000000000 -0400
+++ a/net/x25/af_x25.c 2008-04-19 15:14:52.000000000 -0400
@@ -506,7 +506,10 @@ static int x25_create(struct net *net, s
x25 = x25_sk(sk);
- sock_init_data(sock, sk);
+ sk->sk_socket = sock;
+ sk->sk_type = sock->type;
+ sk->sk_sleep = &sock->wait;
+ sock->sk = sk;
x25_init_timers(sk);
--- a/scripts/checksyscalls.sh 2008-04-17 11:31:42.000000000 -0400
+++ a/scripts/checksyscalls.sh 2008-04-19 15:14:52.000000000 -0400
@@ -108,7 +108,6 @@ cat << EOF
#define __IGNORE_afs_syscall
#define __IGNORE_getpmsg
#define __IGNORE_putpmsg
-#define __IGNORE_vserver
EOF
}
--- a/security/commoncap.c 2008-04-17 12:05:46.000000000 -0400
+++ a/security/commoncap.c 2008-04-23 22:22:54.000000000 -0400
@@ -24,6 +24,7 @@
#include <linux/hugetlb.h>
#include <linux/mount.h>
#include <linux/sched.h>
+#include <linux/vs_context.h>
/* Global security state */
@@ -32,7 +33,7 @@ EXPORT_SYMBOL(securebits);
int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
{
- NETLINK_CB(skb).eff_cap = current->cap_effective;
+ NETLINK_CB(skb).eff_cap = vx_mbcaps(current->cap_effective);
return 0;
}
@@ -53,9 +54,24 @@ EXPORT_SYMBOL(cap_netlink_recv);
*/
int cap_capable (struct task_struct *tsk, int cap)
{
+ struct vx_info *vxi = tsk->vx_info;
+
+#if 0
+ printk("cap_capable() VXF_STATE_SETUP = %llx, raised = %x, eff = %08x:%08x\n",
+ vx_info_flags(vxi, VXF_STATE_SETUP, 0),
+ cap_raised(tsk->cap_effective, cap),
+ tsk->cap_effective.cap[1], tsk->cap_effective.cap[0]);
+#endif
+
+ /* special case SETUP */
+ if (vx_info_flags(vxi, VXF_STATE_SETUP, 0) &&
+ cap_raised(tsk->cap_effective, cap))
+ return 0;
+
/* Derived from include/linux/sched.h:capable. */
- if (cap_raised(tsk->cap_effective, cap))
+ if (vx_cap_raised(vxi, tsk->cap_effective, cap))
return 0;
+
return -EPERM;
}
@@ -583,7 +599,8 @@ void cap_task_reparent_to_init (struct t
int cap_syslog (int type)
{
- if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
+ if ((type != 3 && type != 10) &&
+ !vx_capable(CAP_SYS_ADMIN, VXC_SYSLOG))
return -EPERM;
return 0;
}
--- a/security/dummy.c 2008-04-17 12:05:46.000000000 -0400
+++ a/security/dummy.c 2008-04-23 20:28:54.000000000 -0400
@@ -27,6 +27,7 @@
#include <linux/hugetlb.h>
#include <linux/ptrace.h>
#include <linux/file.h>
+#include <linux/vs_context.h>
static int dummy_ptrace (struct task_struct *parent, struct task_struct *child)
{
@@ -714,7 +715,7 @@ static int dummy_sem_semop (struct sem_a
static int dummy_netlink_send (struct sock *sk, struct sk_buff *skb)
{
- NETLINK_CB(skb).eff_cap = current->cap_effective;
+ NETLINK_CB(skb).eff_cap = vx_mbcaps(current->cap_effective);
return 0;
}
--- a/security/selinux/hooks.c 2008-05-21 14:30:05.000000000 -0400
+++ a/security/selinux/hooks.c 2008-05-21 14:30:41.000000000 -0400
@@ -64,7 +64,6 @@
#include <linux/dccp.h>
#include <linux/quota.h>
#include <linux/un.h> /* for Unix socket types */
-#include <net/af_unix.h> /* for Unix socket types */
#include <linux/parser.h>
#include <linux/nfs_mount.h>
#include <net/ipv6.h>