Re: [PATCH v3] kernel: add kcov code coverage
From: Kirill A. Shutemov
Date: Mon Jan 18 2016 - 07:42:18 EST
On Thu, Jan 14, 2016 at 03:22:21PM +0100, Dmitry Vyukov wrote:
> +/*
> + * Entry point from instrumented code.
> + * This is called once per basic-block/edge.
> + */
> +void __sanitizer_cov_trace_pc(void)
> +{
> + struct task_struct *t;
> + enum kcov_mode mode;
> +
> + t = current;
> + /*
> + * We are interested in code coverage as a function of a syscall inputs,
> + * so we ignore code executed in interrupts.
> + */
> + if (!t || in_interrupt())
> + return;
> + mode = READ_ONCE(t->kcov_mode);
> + if (mode == kcov_mode_trace) {
> + u32 *area;
> + u32 pos;
> +
> + /*
> + * There is some code that runs in interrupts but for which
> + * in_interrupt() returns false (e.g. preempt_schedule_irq()).
> + * READ_ONCE()/barrier() effectively provides load-acquire wrt
> + * interrupts, there are paired barrier()/WRITE_ONCE() in
> + * kcov_ioctl_locked().
> + */
> + barrier();
> + area = t->kcov_area;
> + /* The first u32 is number of subsequent PCs. */
> + pos = READ_ONCE(area[0]) + 1;
> + if (likely(pos < t->kcov_size)) {
> + area[pos] = (u32)_RET_IP_;
If area[0] is UINT32_MAX the condition will be true and you'll make
area[0] temporary set to (u32)_RET_IP_. That's may be fun. :)
> + WRITE_ONCE(area[0], pos);
> + }
> + }
> +}
> +EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
...
> +static int kcov_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
> +{
> + struct kcov *kcov;
> + unsigned long off;
> + struct page *page;
> +
> + /* Map the preallocated kcov->area. */
> + kcov = vma->vm_file->private_data;
> + off = vmf->pgoff << PAGE_SHIFT;
> + if (off >= kcov->size * sizeof(u32))
> + return VM_FAULT_SIGSEGV;
SIGBUS.
> +
> + page = vmalloc_to_page(kcov->area + off);
> + get_page(page);
> + vmf->page = page;
> + return 0;
> +}
BTW, since all pages pre-allocated, you don't really need fault handler --
just setup all pages table enties during mmap. This would shorten warm up
period for userspace. See vm_insert_page().
> +
> +static void kcov_unmap(struct vm_area_struct *vma)
> +{
> + kcov_put(vma->vm_file->private_data);
> +}
> +
> +static void kcov_map_copied(struct vm_area_struct *vma)
> +{
> + struct kcov *kcov;
> +
> + kcov = vma->vm_file->private_data;
> + kcov_get(kcov);
> +}
> +
> +static const struct vm_operations_struct kcov_vm_ops = {
> + .fault = kcov_vm_fault,
> + .close = kcov_unmap,
> + /* Called on fork()/clone() when the mapping is copied. */
> + .open = kcov_map_copied,
> +};
> +
> +static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
> +{
> + int res = 0;
> + void *area;
> + struct kcov *kcov = vma->vm_file->private_data;
> +
> + area = vmalloc_user(vma->vm_end - vma->vm_start);
> + if (!area)
> + return -ENOMEM;
> +
> + spin_lock(&kcov->lock);
> + if (kcov->mode == 0 || vma->vm_pgoff != 0 ||
> + vma->vm_end - vma->vm_start != kcov->size * sizeof(u32)) {
You need VM_DONTEXPAND to keep the invariant. Not sure about VM_DONTDUMP.
> + res = -EINVAL;
> + goto exit;
> + }
> + if (!kcov->area) {
> + kcov->area = area;
> + area = NULL;
> + }
> + /*
> + * The file drops a reference on close, but the file
> + * descriptor can be closed with the mmaping still alive so we keep
> + * a reference for those. This is put in kcov_unmap().
> + */
> + kcov_get(kcov);
> + vma->vm_ops = &kcov_vm_ops;
> +exit:
> + spin_unlock(&kcov->lock);
> + vfree(area);
> + return res;
> +}
--
Kirill A. Shutemov