Index: sys/sysctl.h =================================================================== RCS file: /cvsroot/src/sys/sys/sysctl.h,v retrieving revision 1.215 diff -u -p -r1.215 sysctl.h --- sys/sysctl.h 4 Jan 2015 22:11:40 -0000 1.215 +++ sys/sysctl.h 21 Sep 2015 21:20:26 -0000 @@ -389,6 +389,8 @@ struct clockinfo { #define KERN_PROC_RUID 6 /* by real uid */ #define KERN_PROC_GID 7 /* by effective gid */ #define KERN_PROC_RGID 8 /* by real gid */ +#define KERN_PROC_PATHNAME 9 /* path to executable */ +#define KERN_PROC_VMMAP 10 /* VM map entries */ /* * KERN_PROC_TTY sub-subtypes @@ -804,6 +806,61 @@ struct kinfo_file { #define KERN_FILESLOP 10 /* + * The KERN_PROC_VMMAP sysctl allows a process to dump the VM layout of + * another process as a series of entries. + */ +#define KVME_TYPE_NONE 0 +#define KVME_TYPE_OBJECT 1 +#define KVME_TYPE_VNODE 2 +#define KVME_TYPE_KERN 3 +#define KVME_TYPE_DEVICE 4 +#define KVME_TYPE_ANON 5 +#define KVME_TYPE_SUBMAP 6 +#define KVME_TYPE_UNKNOWN 255 + +#define KVME_PROT_READ 0x00000001 +#define KVME_PROT_WRITE 0x00000002 +#define KVME_PROT_EXEC 0x00000004 + +#define KVME_FLAG_COW 0x00000001 +#define KVME_FLAG_NEEDS_COPY 0x00000002 +#define KVME_FLAG_NOCOREDUMP 0x00000004 +#define KVME_FLAG_PAGEABLE 0x00000008 +#define KVME_FLAG_GROWS_UP 0x00000010 +#define KVME_FLAG_GROWS_DOWN 0x00000020 + +struct kinfo_vmentry { + uint64_t kve_start; /* Starting address. */ + uint64_t kve_end; /* Finishing address. */ + uint64_t kve_offset; /* Mapping offset in object */ + + uint32_t kve_type; /* Type of map entry. */ + uint32_t kve_flags; /* Flags on map entry. */ + + uint32_t kve_count; /* Number of pages/entries */ + uint32_t kve_wired_count; /* Number of wired pages */ + + uint32_t kve_advice; /* Advice */ + uint32_t kve_attributes; /* Map attribute */ + + uint32_t kve_protection; /* Protection bitmask. */ + uint32_t kve_max_protection; /* Max protection bitmask */ + + uint32_t kve_ref_count; /* VM obj ref count. */ + uint32_t kve_inheritance; /* Inheritance */ + + uint64_t kve_vn_fileid; /* inode number if vnode */ + uint64_t kve_vn_size; /* File size. */ + uint64_t kve_vn_fsid; /* dev_t of vnode location */ + uint64_t kve_vn_rdev; /* Device id if device. */ + + uint32_t kve_vn_type; /* Vnode type. */ + uint32_t kve_vn_mode; /* File mode. */ + + char kve_path[PATH_MAX]; /* Path to VM obj, if any. */ +}; + +/* * kern.evcnt returns an array of these structures, which are designed both to * be immune to 32/64 bit emulation issues. Note that the struct here differs * from the real struct evcnt but contains the same information in order to Index: kern/kern_proc.c =================================================================== RCS file: /cvsroot/src/sys/kern/kern_proc.c,v retrieving revision 1.193 diff -u -p -r1.193 kern_proc.c --- kern/kern_proc.c 12 Jul 2014 09:57:25 -0000 1.193 +++ kern/kern_proc.c 21 Sep 2015 21:20:26 -0000 @@ -97,10 +97,13 @@ __KERNEL_RCSID(0, "$NetBSD: kern_proc.c, #include #include #include +#include +#include #include #include #include +#include #ifdef COMPAT_NETBSD32 #include @@ -234,6 +237,9 @@ static pool_cache_t proc_cache; static kauth_listener_t proc_listener; +static int fill_pathname(struct lwp *, pid_t, void *, size_t *); +static int fill_vmentries(struct lwp *, pid_t, u_int, void *, size_t *); + static int proc_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, void *arg0, void *arg1, void *arg2, void *arg3) @@ -387,6 +393,8 @@ procinit_sysctl(void) KERN_PROC_RUID uid KERN_PROC_GID gid KERN_PROC_RGID gid + KERN_PROC_PATHNAME path[MAXPATHLEN] + KERN_PROC_VMMAP struct kinfo_vmentry all in all, probably not worth the effort... */ @@ -1633,18 +1641,42 @@ sysctl_doeproc(SYSCTLFN_ARGS) type = rnode->sysctl_num; if (type == KERN_PROC) { - if (namelen != 2 && !(namelen == 1 && name[0] == KERN_PROC_ALL)) - return (EINVAL); - op = name[0]; - if (op != KERN_PROC_ALL) + if (namelen == 0) + return EINVAL; + switch (op = name[0]) { + case KERN_PROC_PATHNAME: + if (namelen != 2) + return EINVAL; + sysctl_unlock(); + error = fill_pathname(l, name[1], oldp, oldlenp); + sysctl_relock(); + return error; + + case KERN_PROC_VMMAP: + if (namelen != 3) + return EINVAL; + sysctl_unlock(); + error = fill_vmentries(l, name[1], name[2], + oldp, oldlenp); + sysctl_relock(); + return error; + + case KERN_PROC_ALL: + if (namelen != 1) + return EINVAL; + arg = 0; + break; + default: + if (namelen != 2) + return EINVAL; arg = name[1]; - else - arg = 0; /* Quell compiler warning */ + break; + } elem_count = 0; /* Ditto */ kelem_size = elem_size = sizeof(kbuf->kproc); } else { if (namelen != 4) - return (EINVAL); + return EINVAL; op = name[0]; arg = name[1]; elem_size = name[2]; @@ -2386,3 +2418,205 @@ fill_kproc2(struct proc *p, struct kinfo ki->p_uctime_usec = ut.tv_usec; } } + + +static int +find_proc(struct lwp *l, struct proc **p, pid_t pid) +{ + int error; + + mutex_enter(proc_lock); + if (pid == -1) + *p = l->l_proc; + else + *p = proc_find(pid); + + if (*p == NULL) { + if (pid != -1) + mutex_exit(proc_lock); + return ESRCH; + } + if (pid != -1) + mutex_enter((*p)->p_lock); + mutex_exit(proc_lock); + + error = kauth_authorize_process(l->l_cred, + KAUTH_PROCESS_CANSEE, *p, + KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); + if (error) { + if (pid != -1) + mutex_exit((*p)->p_lock); + } + return error; +} + +static int +fill_pathname(struct lwp *l, pid_t pid, void *oldp, size_t *oldlenp) +{ +#ifndef _RUMPKERNEL + int error; + struct proc *p; + char *path; + size_t len; + + if ((error = find_proc(l, &p, pid)) != 0) + return error; + + if (p->p_textvp == NULL) { + if (pid != -1) + mutex_exit(p->p_lock); + return ENOENT; + } + + path = PNBUF_GET(); + error = vnode_to_path(path, MAXPATHLEN / 2, p->p_textvp, l, p); + if (error) + goto out; + + len = strlen(path) + 1; + if (oldp != NULL) { + error = sysctl_copyout(l, path, oldp, *oldlenp); + if (error == 0 && *oldlenp < len) + error = ENOSPC; + } + *oldlenp = len; +out: + PNBUF_PUT(path); + if (pid != -1) + mutex_exit(p->p_lock); + return error; +#else + return 0; +#endif +} + +static int +fill_vmentry(struct lwp *l, struct proc *p, struct kinfo_vmentry *kve, + struct vm_map *m, struct vm_map_entry *e) +{ +#ifndef _RUMPKERNEL + int error; + + memset(kve, 0, sizeof(*kve)); + KASSERT(e != NULL); + if (UVM_ET_ISOBJ(e)) { + struct uvm_object *uobj = e->object.uvm_obj; + KASSERT(uobj != NULL); + kve->kve_ref_count = uobj->uo_refs; + kve->kve_count = uobj->uo_npages; + if (UVM_OBJ_IS_VNODE(uobj)) { + struct vattr va; + struct vnode *vp = (struct vnode *)uobj; + vn_lock(vp, LK_SHARED | LK_RETRY); + error = VOP_GETATTR(vp, &va, l->l_cred); + VOP_UNLOCK(vp); + kve->kve_type = KVME_TYPE_VNODE; + if (error == 0) { + kve->kve_vn_size = vp->v_size; + kve->kve_vn_type = (int)vp->v_type; + kve->kve_vn_mode = va.va_mode; + kve->kve_vn_rdev = va.va_rdev; + kve->kve_vn_fileid = va.va_fileid; + kve->kve_vn_fsid = va.va_fsid; + error = vnode_to_path(kve->kve_path, + sizeof(kve->kve_path) / 2, vp, l, p); +#ifdef DIAGNOSTIC + if (error) + printf("%s: vp %p error %d\n", __func__, + vp, error); +#endif + } + } else if (UVM_OBJ_IS_KERN_OBJECT(uobj)) { + kve->kve_type = KVME_TYPE_KERN; + } else if (UVM_OBJ_IS_DEVICE(uobj)) { + kve->kve_type = KVME_TYPE_DEVICE; + } else if (UVM_OBJ_IS_AOBJ(uobj)) { + kve->kve_type = KVME_TYPE_ANON; + } else { + kve->kve_type = KVME_TYPE_OBJECT; + } + } else if (UVM_ET_ISSUBMAP(e)) { + struct vm_map *map = e->object.sub_map; + KASSERT(map != NULL); + kve->kve_ref_count = map->ref_count; + kve->kve_count = map->nentries; + kve->kve_type = KVME_TYPE_SUBMAP; + } else + kve->kve_type = KVME_TYPE_UNKNOWN; + + kve->kve_start = e->start; + kve->kve_end = e->end; + kve->kve_offset = e->offset; + kve->kve_wired_count = e->wired_count; + kve->kve_inheritance = e->inheritance; + kve->kve_attributes = e->map_attrib; + kve->kve_advice = e->advice; +#define PROT(p) (((p) & VM_PROT_READ) ? KVME_PROT_READ : 0) | \ + (((p) & VM_PROT_WRITE) ? KVME_PROT_WRITE : 0) | \ + (((p) & VM_PROT_EXECUTE) ? KVME_PROT_EXEC : 0) + kve->kve_protection = PROT(e->protection); + kve->kve_max_protection = PROT(e->max_protection); + kve->kve_flags |= (e->etype & UVM_ET_COPYONWRITE) + ? KVME_FLAG_COW : 0; + kve->kve_flags |= (e->etype & UVM_ET_NEEDSCOPY) + ? KVME_FLAG_NEEDS_COPY : 0; + kve->kve_flags |= (m->flags & VM_MAP_TOPDOWN) + ? KVME_FLAG_GROWS_DOWN : KVME_FLAG_GROWS_UP; + kve->kve_flags |= (m->flags & VM_MAP_PAGEABLE) + ? KVME_FLAG_PAGEABLE : 0; +#endif + return 0; +} + +static int +fill_vmentries(struct lwp *l, pid_t pid, u_int elem_size, void *oldp, + size_t *oldlenp) +{ + int error; + struct proc *p; + struct kinfo_vmentry vme; + struct vmspace *vm; + struct vm_map *map; + struct vm_map_entry *entry; + char *dp; + int count; + + count = 0; + + if ((error = find_proc(l, &p, pid)) != 0) + return error; + + if ((error = proc_vmspace_getref(p, &vm)) != 0) + goto out; + + map = &vm->vm_map; + vm_map_lock_read(map); + + dp = oldp; + for (entry = map->header.next; entry != &map->header; + entry = entry->next) { + if (oldp && (dp - (char *)oldp) < *oldlenp + elem_size) { + error = fill_vmentry(l, p, &vme, map, entry); + if (error) + break; + error = sysctl_copyout(l, &vme, dp, + min(elem_size, sizeof(vme))); + if (error) + break; + dp += elem_size; + } + count++; + } + vm_map_unlock_read(map); + uvmspace_free(vm); +out: + if (pid != -1) + mutex_exit(p->p_lock); + if (error == 0) { + count *= elem_size; + if (oldp != NULL && *oldlenp < count) + error = ENOSPC; + *oldlenp = count; + } + return error; +}