V4L2學習(四)VIVI分析

  vivi 相對於後面要分析的 usb 攝像頭驅動程序,它沒有真正的硬件相關層的操做,也就是說拋開了複雜的 usb 層的相關知識,便於理解 V4L2 驅動框架,側重於驅動和應用的交互。node

  前面咱們提到,V4L2 的核心是 v4l2-dev.c 它向上提供統一的文件操做接口 v4l2_fops ,向下提供 video_device 註冊接口 register_video_device ,做爲一個具體的驅動,須要作的工做就是分配、設置、註冊一個 video_device.框架很簡單,複雜的是視頻設備相關衆多的 ioctl。數組


1、vivi 框架分析app

static int __init vivi_init(void) { ret = vivi_create_instance(i); ... return ret; } module_init(vivi_init);

    vivi 分配了一個 video_device 指針,沒有去設置而是直接讓它指向了一個現成的 video_device 結構 vivi_template ,那麼所有的工做都將圍繞 vivi_template 展開。框架

static int __init vivi_create_instance(int inst) { struct vivi_dev *dev; struct video_device *vfd; struct v4l2_ctrl_handler *hdl; struct vb2_queue *q; // 分配一個 vivi_dev 結構體
    dev = kzalloc(sizeof(*dev), GFP_KERNEL); // v4l2_dev 初始化,並無什麼做用
    ret = v4l2_device_register(NULL, &dev->v4l2_dev); // 設置 dev 的一些參數,好比圖像格式、大小
    dev->fmt = &formats[0]; dev->width = 640; dev->height = 480; dev->pixelsize = dev->fmt->depth / 8; ... // vivi_dev->vb_vidq(vb2_queue) 初始化
    q = &dev->vb_vidq; memset(q, 0, sizeof(dev->vb_vidq)); q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q->drv_priv = dev; q->buf_struct_size = sizeof(struct vivi_buffer); // vivi_dev->vb_vidq(vb2_queue)->ops
    q->ops     = &vivi_video_qops; // vivi_dev->vb_vidq(vb2_queue)->mem_ops
    q->mem_ops = &vb2_vmalloc_memops; // 初始化一些鎖之類的東西
 vb2_queue_init(q); /* init video dma queues */ INIT_LIST_HEAD(&dev->vidq.active); init_waitqueue_head(&dev->vidq.wq); // 分配一個 video_device ,這纔是重點
    vfd = video_device_alloc(); *vfd = vivi_template; vfd->debug = debug; vfd->v4l2_dev = &dev->v4l2_dev; set_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags); vfd->lock = &dev->mutex; // 註冊 video_device !!!
    ret = video_register_device(vfd, VFL_TYPE_GRABBER, video_nr); // 把 vivi_dev 放入 video_device->dev->p->driver_data ,這個後邊常常用到
 video_set_drvdata(vfd, dev); /* Now that everything is fine, let's add it to device list */ list_add_tail(&dev->vivi_devlist, &vivi_devlist); if (video_nr != -1) video_nr++; // vivi_dev->vfd(video_device) = vfd
    dev->vfd = vfd; v4l2_info(&dev->v4l2_dev, "V4L2 device registered as %s\n", video_device_node_name(vfd)); return 0; }

  用戶空間調用的是 v4l2_fops ,可是最終會調用到 vivi_fops ,vivi_fops 中的 ioctl 調用 video_ioctl2ide

static struct video_device vivi_template = { .name = "vivi", .fops = &vivi_fops, .ioctl_ops = &vivi_ioctl_ops, .minor = -1, .release = video_device_release, .tvnorms = V4L2_STD_525_60, .current_norm = V4L2_STD_NTSC_M, };

  video_register_device 過程就不詳細分析了,前面的文章中分析過,大概就是向核心層註冊 video_device 結構體,核心層註冊字符設備並提供一個統一的 fops ,當用戶空間 read write ioctl 等,最終仍是會跳轉到 video_device->fops ,還有一點就是核心層會把咱們註冊進來的 video_device 結構放入一個全局的 video_device數組。函數

static const struct v4l2_file_operations vivi_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vivi_close, .read = vivi_read, .poll = vivi_poll, .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */ .mmap = vivi_mmap, };

  這裏,先看一下 v4l2_fh_open 函數this

int v4l2_fh_open(struct file *filp) {   // 前面註冊時,咱們將 video_device 結構體放入了全局數組 video_device ,如今經過     video_devdata 函數取出來,後面常常用到這種作法
    struct video_device *vdev = video_devdata(filp);     // 分配一個 v4l2_fh 結構,放入file->private_data 中
    struct v4l2_fh *fh = kzalloc(sizeof(*fh), GFP_KERNEL);     filp->private_data = fh;     if (fh == NULL)         return -ENOMEM;     v4l2_fh_init(fh, vdev);     v4l2_fh_add(fh);     return 0; }

  一、咱們隨時能夠經過 video_devdata 取出咱們註冊的 video_device 結構進行操做
  二、咱們隨時能夠經過 file->private_data 取出 v4l2_fh 結構,雖然如今還不知道它有啥用atom

  下面來分析 ioctl ...首先來看一下調用過程 spa

long video_ioctl2(struct file *file, unsigned int cmd, unsigned long arg) { return video_usercopy(file, cmd, arg, __video_do_ioctl); } static long __video_do_ioctl(struct file *file, unsigned int cmd, void *arg) { struct video_device *vfd = video_devdata(file); const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops; void *fh = file->private_data; struct v4l2_fh *vfh = NULL; int use_fh_prio = 0; long ret = -ENOTTY; if (ops == NULL) { printk(KERN_WARNING "videodev: \"%s\" has no ioctl_ops.\n", vfd->name); return ret; } if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { vfh = file->private_data; use_fh_prio = test_bit(V4L2_FL_USE_FH_PRIO, &vfd->flags); } if (v4l2_is_known_ioctl(cmd)) { struct v4l2_ioctl_info *info = &v4l2_ioctls[_IOC_NR(cmd)]; if (!test_bit(_IOC_NR(cmd), vfd->valid_ioctls) &&
            !((info->flags & INFO_FL_CTRL) && vfh && vfh->ctrl_handler)) return -ENOTTY; if (use_fh_prio && (info->flags & INFO_FL_PRIO)) { ret = v4l2_prio_check(vfd->prio, vfh->prio); if (ret) return ret; } } if ((vfd->debug & V4L2_DEBUG_IOCTL) &&
                !(vfd->debug & V4L2_DEBUG_IOCTL_ARG)) { v4l_print_ioctl(vfd->name, cmd); printk(KERN_CONT "\n"); } switch (cmd) { /* --- capabilities ------------------------------------------ */
    case VIDIOC_QUERYCAP: { struct v4l2_capability *cap = (struct v4l2_capability *)arg; cap->version = LINUX_VERSION_CODE; ret = ops->vidioc_querycap(file, fh, cap); if (!ret) dbgarg(cmd, "driver=%s, card=%s, bus=%s, "
                    "version=0x%08x, "
                    "capabilities=0x%08x, "
                    "device_caps=0x%08x\n", cap->driver, cap->card, cap->bus_info, cap->version, cap->capabilities, cap->device_caps); break; }

  vivi 驅動就複雜在這些 ioctl 上,下面按照應用層與驅動的交互順序來具體的分析這些 ioctl。線程

2、ioctl 深刻分析

  應用空間的一個視頻 app 與驅動的交互流程大體以下圖所示:

下面就根據流程,分析每個 ioctl 在 vivi 中的具體實現。把以上的過程吃透,本身寫一個虛擬攝像頭程序應該就不成問題了。

  2.1 VIDIOC_QUERYCAP 查詢設備能力

應用層:

struct v4l2_capability { __u8 driver[16];    /* i.e. "bttv" */ __u8 card[32];    /* i.e. "Hauppauge WinTV" */ __u8 bus_info[32];    /* "PCI:" + pci_name(pci_dev) */ __u32 version; <span style="white-space:pre">    </span>/* should use KERNEL_VERSION() */ __u32 capabilities; /* Device capabilities */ __u32 reserved[4]; }; struct v4l2_capability cap; ret = ioctl(fd,VIDIOC_QUERYCAP,&cap); if (ret < 0) { LOG("VIDIOC_QUERYCAP failed (%d)\n", ret); return ret; }

驅動層:

void *fh = file->private_data; ops->vidioc_querycap(file, fh, cap); static int vidioc_querycap(struct file *file, void  *priv, struct v4l2_capability *cap) {     struct vivi_fh  *fh  = priv;     struct vivi_dev *dev = fh->dev;     // 這裏只是將一些信息寫回用戶空間而已,很是簡單
    strcpy(cap->driver, "vivi");       strcpy(cap->card, "vivi");     strlcpy(cap->bus_info, dev->v4l2_dev.name, sizeof(cap->bus_info));     cap->version =     VIVI_VERSION; cap->capabilities =V4L2_CAP_VIDEO_CAPTURE |V4L2_CAP_STREAMING     | V4L2_CAP_READWRITE;return 0;} }

通常咱們只關心 capabilities 成員,好比V4L2_CAP_VIDEO_CAPTURE 具備視頻捕獲能力,其它定義以下:

/* Values for 'capabilities' field */
#define V4L2_CAP_VIDEO_CAPTURE        0x00000001  /* Is a video capture device */
#define V4L2_CAP_VIDEO_OUTPUT        0x00000002  /* Is a video output device */
#define V4L2_CAP_VIDEO_OVERLAY        0x00000004  /* Can do video overlay */
#define V4L2_CAP_VBI_CAPTURE        0x00000010  /* Is a raw VBI capture device */
#define V4L2_CAP_VBI_OUTPUT        0x00000020  /* Is a raw VBI output device */
#define V4L2_CAP_SLICED_VBI_CAPTURE    0x00000040  /* Is a sliced VBI capture device */
#define V4L2_CAP_SLICED_VBI_OUTPUT    0x00000080  /* Is a sliced VBI output device */
#define V4L2_CAP_RDS_CAPTURE        0x00000100  /* RDS data capture */
#define V4L2_CAP_VIDEO_OUTPUT_OVERLAY    0x00000200  /* Can do video output overlay */
#define V4L2_CAP_HW_FREQ_SEEK        0x00000400  /* Can do hardware frequency seek  */
#define V4L2_CAP_RDS_OUTPUT        0x00000800  /* Is an RDS encoder */

  2.2 VIDIOC_ENUM_FMT 枚舉(查詢)設備支持的視頻格式

應用層:

struct v4l2_fmtdesc { __u32 index; /* Format number */
    enum v4l2_buf_type  type;              /* buffer type */ __u32 flags; __u8 description[32];   /* Description string */ __u32 pixelformat; /* Format fourcc */ __u32 reserved[4]; }; struct v4l2_fmtdesc fmtdesc; fmtdesc.index=0; fmtdesc.type=V4L2_BUF_TYPE_VIDEO_CAPTURE; while(ioctl(fd,VIDIOC_ENUM_FMT,&fmtdesc)!=-1) { printf("SUPPORT\t%d.%s\n",fmtdesc.index+1,fmtdesc.description); fmtdesc.index++; }

驅動層:

static struct vivi_fmt formats[] = { { .name = "4:2:2, packed, YUYV", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, }, ... } static int vidioc_enum_fmt_vid_cap(struct file *file, void  *priv, struct v4l2_fmtdesc *f) { struct vivi_fmt *fmt; if (f->index >= ARRAY_SIZE(formats)) return -EINVAL; fmt = &formats[f->index]; strlcpy(f->description, fmt->name, sizeof(f->description)); f->pixelformat = fmt->fourcc; return 0; }

  通常一個設備支持多種視頻格式,好比 vivi 它所支持的格式存放在 formats 數組中,因爲應用層並不知道設備支持多少種格式,也不知道某種格式具體存放在哪一個數組項中,所以經過index從0開始嘗試,對於驅動層來講就是遍歷全部的數組項,返回每個index對應的視頻格式,好比 V4L2_PIX_FMT_YUYV .

  2.3 VIDIOC_S_FMT 設置視頻格式

應用層:

struct v4l2_format { enum v4l2_buf_type type; union { struct v4l2_pix_format        pix;     /* V4L2_BUF_TYPE_VIDEO_CAPTURE */
        struct v4l2_window        win;     /* V4L2_BUF_TYPE_VIDEO_OVERLAY */
        struct v4l2_vbi_format        vbi;     /* V4L2_BUF_TYPE_VBI_CAPTURE */
        struct v4l2_sliced_vbi_format    sliced;  /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */ __u8 raw_data[200];                   /* user-defined */ } fmt; }; struct v4l2_pix_format { __u32 width; __u32 height; __u32 pixelformat; enum v4l2_field field; __u32 bytesperline; /* for padding, zero if unused */ __u32 sizeimage; enum v4l2_colorspace colorspace; __u32 priv; /* private data, depends on pixelformat */ }; struct v4l2_format fmt; memset(&fmt, 0, sizeof(fmt)); fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;//格式類型
fmt.fmt.pix.width //寬度
fmt.fmt.pix.height //高度
fmt.fmt.pix.pixelformat = VIDEO_FORMAT;//這一項必須是前面查詢出來的某種格式,對應 vivi formats數組
fmt.fmt.pix.field       = V4L2_FIELD_INTERLACED;//好像是隔行掃描的意思
ret = ioctl(fd, VIDIOC_S_FMT, &fmt); if (ret < 0) { LOG("VIDIOC_S_FMT failed (%d)\n", ret); return ret; }

驅動層:

static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vivi_dev *dev = video_drvdata(file); struct vb2_queue *q = &dev->vb_vidq; int ret = vidioc_try_fmt_vid_cap(file, priv, f); //if (fmt->fourcc == f->fmt.pix.pixelformat)返回formats[k]
    dev->fmt = get_format(f); dev->pixelsize     = dev->fmt->depth / 8; dev->width         = f->fmt.pix.width; dev->height     = f->fmt.pix.height; dev->field         = f->fmt.pix.field; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vivi_dev *dev = video_drvdata(file); struct vivi_fmt *fmt; enum v4l2_field field; fmt = get_format(f); field = f->fmt.pix.field; if (field == V4L2_FIELD_ANY) { field = V4L2_FIELD_INTERLACED; } f->fmt.pix.field = field; v4l_bound_align_image(&f->fmt.pix.width, 48, MAX_WIDTH, 2, &f->fmt.pix.height, 32, MAX_HEIGHT, 0, 0); f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; if (fmt->fourcc == V4L2_PIX_FMT_YUYV || fmt->fourcc == V4L2_PIX_FMT_UYVY) f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; else f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; return 0; }

  這裏將應用層傳進來的視頻格式簡單處理後存放進了一個 vivi_dev 結構,vivi_dev 哪裏來的呢?,在一開始的時候 vivi_create_instance ,咱們建立了一個 video_device 結構表明咱們的設備,並設置了一個 vivi_dev 做爲 video_device->dev->privatedata ,以後 register_video_device ,內核會自動將咱們的 video_device 放入全局數組 video_device[] 中。
  2.4 VIDIOC_G_FMT 得到設置好的視頻格式

應用層:

ret = ioctl(fd, VIDIOC_G_FMT, &fmt); if (ret < 0) { LOG("VIDIOC_G_FMT failed (%d)\n", ret); return ret; } // Print Stream Format
LOG("Stream Format Informations:\n"); LOG(" type: %d\n", fmt.type); LOG(" width: %d\n", fmt.fmt.pix.width); LOG(" height: %d\n", fmt.fmt.pix.height); char fmtstr[8]; memset(fmtstr, 0, 8); memcpy(fmtstr, &fmt.fmt.pix.pixelformat, 4); LOG(" pixelformat: %s\n", fmtstr); LOG(" field: %d\n", fmt.fmt.pix.field); LOG(" bytesperline: %d\n", fmt.fmt.pix.bytesperline); LOG(" sizeimage: %d\n", fmt.fmt.pix.sizeimage); LOG(" colorspace: %d\n", fmt.fmt.pix.colorspace); LOG(" priv: %d\n", fmt.fmt.pix.priv); LOG(" raw_date: %s\n", fmt.fmt.raw_data);

驅動層:

static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct vivi_dev *dev = video_drvdata(file); <span style="white-space:pre">    </span>// 把記錄在 vivi_dev 中的參數寫回用戶空間
    f->fmt.pix.width        = dev->width; f->fmt.pix.height       = dev->height; f->fmt.pix.field        = dev->field; f->fmt.pix.pixelformat  = dev->fmt->fourcc; f->fmt.pix.bytesperline = (f->fmt.pix.width * dev->fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; if (dev->fmt->fourcc == V4L2_PIX_FMT_YUYV || dev->fmt->fourcc == V4L2_PIX_FMT_UYVY) f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; else f->fmt.pix.colorspace = V4L2_COLORSPACE_SRGB; return 0; }

  將咱們以前設置的格式返回而已。

  2.5 VIDIOC_REQBUFS 請求在內核空間分配視頻緩衝區

    分配的內存位於內核空間,應用程序沒法直接訪問,須要經過調用mmap內存映射函數,把內核空間的內存映射到用戶空間,應用才能夠用用戶空間地址來訪問內核空間。
應用層:

struct v4l2_requestbuffers { __u32 count; __u32 type; /* enum v4l2_buf_type */ __u32 memory; /* enum v4l2_memory */ __u32 reserved[2]; }; struct v4l2_requestbuffers reqbuf; reqbuf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; reqbuf.memory = V4L2_MEMORY_MMAP; reqbuf.count = BUFFER_COUNT; ret = ioctl(fd , VIDIOC_REQBUFS, &reqbuf); if(ret < 0) { LOG("VIDIOC_REQBUFS failed (%d)\n", ret); return ret; }

驅動層:

static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct vivi_dev *dev = video_drvdata(file); return vb2_reqbufs(&dev->vb_vidq, p);    //核心層提供的標準函數
}

vb_vidq 是 vivi_dev 的一個成員,前面咱們提到它有兩個 ops ,一個是 ops 另外一個是 mem_ops

static struct vb2_ops vivi_video_qops = { .queue_setup = queue_setup, .buf_init = buffer_init, .buf_prepare = buffer_prepare, .buf_finish = buffer_finish, .buf_cleanup = buffer_cleanup, .buf_queue = buffer_queue, .start_streaming= start_streaming, .stop_streaming = stop_streaming, .wait_prepare = vivi_unlock, .wait_finish = vivi_lock, }; static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct vivi_dev *dev = video_drvdata(file); return vb2_reqbufs(&dev->vb_vidq, p);    //核心層提供的標準函數
} int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) { unsigned int num_buffers, allocated_buffers, num_planes = 0; int ret = 0; // 判斷 re->count 是否小於 VIDEO_MAX_FRAME
    num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); q->memory = req->memory; //(q)->ops->queue_setup(q,NULL,...)
    ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, q->plane_sizes, q->alloc_ctx); /* Finally, allocate buffers and video memory */ ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); allocated_buffers = ret; q->num_buffers = allocated_buffers; req->count = allocated_buffers; return 0; } static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct vivi_dev *dev = vb2_get_drv_priv(vq); unsigned long size; // 每個buffer 的大小
    size = dev->width * dev->height * dev->pixelsize; if (0 == *nbuffers) *nbuffers = 32; // 若是申請的buffer過多,致使空間不夠減小buffer
    while (size * *nbuffers > vid_limit * 1024 * 1024) (*nbuffers)--; *nplanes = 1; // 把總大小放入 vivi_dev->vb_vidq->plane_size[0]
    sizes[0] = size; return 0; } static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, unsigned int num_buffers, unsigned int num_planes) { unsigned int buffer; struct vb2_buffer *vb; int ret; // 分配多個 vb2_buffer 填充並放入 vivi_dev->vb_vidq->bufs[]
    for (buffer = 0; buffer < num_buffers; ++buffer) { /* Allocate videobuf buffer structures */ vb = kzalloc(q->buf_struct_size, GFP_KERNEL); /* Length stores number of planes for multiplanar buffers */
        if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) vb->v4l2_buf.length = num_planes; vb->state = VB2_BUF_STATE_DEQUEUED; vb->vb2_queue = q; vb->num_planes = num_planes; vb->v4l2_buf.index = q->num_buffers + buffer; vb->v4l2_buf.type = q->type; vb->v4l2_buf.memory = memory; /* Allocate video buffer memory for the MMAP type */
        if (memory == V4L2_MEMORY_MMAP) { ret = __vb2_buf_mem_alloc(vb);//核心提供的標準函數
            ret = call_qop(q, buf_init, vb);//q->ops->buf_init
 } q->bufs[q->num_buffers + buffer] = vb; } __setup_offsets(q, buffer); return buffer; } static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; void *mem_priv; int plane; /* num_planes == 1 */
    for (plane = 0; plane < vb->num_planes; ++plane) { mem_priv = call_memop(q, alloc, q->alloc_ctx[plane], q->plane_sizes[plane]); /* Associate allocator private data with this plane */ vb->planes[plane].mem_priv = mem_priv; vb->v4l2_planes[plane].length = q->[plane]; } return 0; } static void *vb2_vmalloc_alloc(void *alloc_ctx, unsigned long size) { struct vb2_vmalloc_buf *buf; buf = kzalloc(sizeof(*buf), GFP_KERNEL); buf->size = size; // 分配空間
    buf->vaddr = vmalloc_user(buf->size); buf->handler.refcount = &buf->refcount; buf->handler.put = vb2_vmalloc_put; buf->handler.arg = buf; atomic_inc(&buf->refcount); return buf; }

  2.6 VIDIOC_QUERYBUF 查詢分配好的 buffer 信息
    查詢已經分配好的V4L2視頻緩衝區的相關信息,包括緩衝區的使用狀態、在內核空間的偏移地址、緩衝區長度等,而後應用程序根據這些信息使用mmap把內核空間地址映射到用戶空間。
應用層:

struct v4l2_buffer { __u32 index; enum v4l2_buf_type type; __u32 bytesused; __u32 flags; enum v4l2_field field; struct timeval timestamp; struct v4l2_timecode timecode; __u32 sequence; /* memory location */
    enum v4l2_memory memory; union { __u32 offset; unsigned long userptr; } m; __u32 length; __u32 input; __u32 reserved; }; v4l2_buffer buf; buf.index = i; buf.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf.memory = V4L2_MEMORY_MMAP; ret = ioctl(fd , VIDIOC_QUERYBUF, &buf); if(ret < 0) { LOG("VIDIOC_QUERYBUF (%d) failed (%d)\n", i, ret); return ret; }

驅動層:

ops->vidioc_querybuf(file, fh, p); static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct vivi_dev *dev = video_drvdata(file); return vb2_querybuf(&dev->vb_vidq, p); } int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) { struct vb2_buffer *vb; // 取出 buf
    vb = q->bufs[b->index]; // 將 buf 信息寫回用戶空間傳遞的 b
    return __fill_v4l2_buffer(vb, b); } static int __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) { struct vb2_queue *q = vb->vb2_queue; int ret; /* Copy back data such as timestamp, flags, input, etc. */ memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); b->input = vb->v4l2_buf.input; b->reserved = vb->v4l2_buf.reserved; if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) { ret = __verify_planes_array(vb, b); if (ret) return ret; /* * Fill in plane-related data if userspace provided an array * for it. The memory and size is verified above. */ memcpy(b->m.planes, vb->v4l2_planes, b->length * sizeof(struct v4l2_plane)); if (q->memory == V4L2_MEMORY_DMABUF) { unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) b->m.planes[plane].m.fd = 0; } } else { /* * We use length and offset in v4l2_planes array even for * single-planar buffers, but userspace does not. */ b->length = vb->v4l2_planes[0].length; b->bytesused = vb->v4l2_planes[0].bytesused; if (q->memory == V4L2_MEMORY_MMAP) b->m.offset = vb->v4l2_planes[0].m.mem_offset; else if (q->memory == V4L2_MEMORY_USERPTR) b->m.userptr = vb->v4l2_planes[0].m.userptr; else if (q->memory == V4L2_MEMORY_DMABUF) b->m.fd = 0; } /* * Clear any buffer state related flags. */ b->flags &= ~V4L2_BUFFER_STATE_FLAGS; switch (vb->state) { case VB2_BUF_STATE_QUEUED: case VB2_BUF_STATE_ACTIVE: b->flags |= V4L2_BUF_FLAG_QUEUED; break; case VB2_BUF_STATE_ERROR: b->flags |= V4L2_BUF_FLAG_ERROR; /* fall through */
    case VB2_BUF_STATE_DONE: b->flags |= V4L2_BUF_FLAG_DONE; break; case VB2_BUF_STATE_PREPARED: b->flags |= V4L2_BUF_FLAG_PREPARED; break; case VB2_BUF_STATE_DEQUEUED: /* nothing */
        break; } if (__buffer_in_use(q, vb)) b->flags |= V4L2_BUF_FLAG_MAPPED; return 0; }

2.7 mmap

 應用層:

v4l2_buffer framebuf[] framebuf[i].length = buf.length; framebuf[i].start = (char *) mmap( NULL, // 欲指向內存的起始地址,通常爲NULL,表示系統自動分配
    buf.length,    //映射長度
    PROT_READ|PROT_WRITE,     //可讀可寫
    MAP_SHARED,     //對映射區的讀寫會寫回內核空間,並且容許其它映射該內核空間地址的進程共享
 fd, buf.m.offset ); if (framebuf[i].start == MAP_FAILED) { LOG("mmap (%d) failed: %s\n", i, strerror(errno)); return -1; }

驅動層:

static int vivi_mmap(struct file *file, struct vm_area_struct *vma) { struct vivi_dev *dev = video_drvdata(file); int ret; ret = vb2_mmap(&dev->vb_vidq, vma);//核心層提供的函數
    return ret; }

  2.8 VIDIOC_QBUF 
  投放一個空的視頻緩衝區到視頻緩衝區輸入隊列,執行成功後,在啓動視頻設備拍攝圖像時,相應的視頻數據被保存到視頻輸入隊列相應的視頻緩衝區中。
應用層:

ret = ioctl(fd , VIDIOC_QBUF, &buf); if (ret < 0) { LOG("VIDIOC_QBUF (%d) failed (%d)\n", i, ret); return -1;

驅動層:

static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct vivi_dev *dev = video_drvdata(file); return vb2_qbuf(&dev->vb_vidq, p); } int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) { struct rw_semaphore *mmap_sem = NULL; struct vb2_buffer *vb; int ret = 0; vb = q->bufs[b->index]; switch (vb->state) { case VB2_BUF_STATE_DEQUEUED: ret = __buf_prepare(vb, b); } // 將這個 buffer 掛入 q->queued_list
    list_add_tail(&vb->queued_entry, &q->queued_list); vb->state = VB2_BUF_STATE_QUEUED; if (q->streaming) __enqueue_in_driver(vb); /* Fill buffer information for the userspace */ __fill_v4l2_buffer(vb, b); unlock: if (mmap_sem) up_read(mmap_sem); return ret; }

  實質上就是取出一個 vb2_buffer 掛入 vivi_dev->vb_vidq->queued_list
  2.9 VIDIOC_STREAMON
應用層:

enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = ioctl(fd, VIDIOC_STREAMON, &type); if (ret < 0) { LOG("VIDIOC_STREAMON failed (%d)\n", ret); return ret; }

驅動層:

static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct vivi_dev *dev = video_drvdata(file); return vb2_streamon(&dev->vb_vidq, i); } int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) { struct vb2_buffer *vb; int ret; vb->state = VB2_BUF_STATE_ACTIVE; // 在 queued_list 鏈表中取出每個 buffer 調用buffer queue,對於vivi來講就是放入 vidq->active 鏈表
    list_for_each_entry(vb, &q->queued_list, queued_entry) __enqueue_in_driver(vb); ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count)); q->streaming = 1; return 0; } static void __enqueue_in_driver(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; vb->state = VB2_BUF_STATE_ACTIVE; /* sync buffers */
    for (plane = 0; plane < vb->num_planes; ++plane) call_memop(q, prepare, vb->planes[plane].mem_priv); q->ops->buf_queue(vb);// list_add_tail(&buf->list, &vidq->active); 
} static int start_streaming(struct vb2_queue *vq, unsigned int count) { struct vivi_dev *dev = vb2_get_drv_priv(vq); dprintk(dev, 1, "%s\n", __func__); return vivi_start_generating(dev); } static int vivi_start_generating(struct vivi_dev *dev) { struct vivi_dmaqueue *dma_q = &dev->vidq; /* Resets frame counters */ dev->ms = 0; dev->mv_count = 0; dev->jiffies = jiffies; dma_q->frame = 0; dma_q->ini_jiffies = jiffies; // 建立一個內核線程,入口函數 vivi_thread
    dma_q->kthread = kthread_run(vivi_thread, dev, dev->v4l2_dev.name); /* Wakes thread */ wake_up_interruptible(&dma_q->wq); return 0; } static int vivi_thread(void *data) { struct vivi_dev *dev = data; dprintk(dev, 1, "thread started\n"); set_freezable(); for (;;) { vivi_sleep(dev); if (kthread_should_stop()) break; } dprintk(dev, 1, "thread: exit\n"); return 0; } static void vivi_sleep(struct vivi_dev *dev) { struct vivi_dmaqueue *dma_q = &dev->vidq; int timeout; DECLARE_WAITQUEUE(wait, current); add_wait_queue(&dma_q->wq, &wait); if (kthread_should_stop()) goto stop_task; /* Calculate time to wake up */ timeout = msecs_to_jiffies(frames_to_ms(1)); vivi_thread_tick(dev); schedule_timeout_interruptible(timeout); stop_task: remove_wait_queue(&dma_q->wq, &wait); try_to_freeze(); }

  每次調用 vivi_sleep 這個線程都被掛入等待隊列,調用 vivi_thread_tick 填充數據,而後休眠指定的時間自動喚醒,一直循環下去。這樣就生成了一幀一幀的視頻數據。

static void vivi_thread_tick(struct vivi_dev *dev) { struct vivi_dmaqueue *dma_q = &dev->vidq; struct vivi_buffer *buf; unsigned long flags = 0; spin_lock_irqsave(&dev->slock, flags); buf = list_entry(dma_q->active.next, struct vivi_buffer, list); list_del(&buf->list); spin_unlock_irqrestore(&dev->slock, flags); do_gettimeofday(&buf->vb.v4l2_buf.timestamp); /* 填充Buffer */ vivi_fillbuff(dev, buf); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE); } void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) { struct vb2_queue *q = vb->vb2_queue; unsigned long flags; unsigned int plane; /* sync buffers */
    for (plane = 0; plane < vb->num_planes; ++plane) call_memop(q, finish, vb->planes[plane].mem_priv); /* Add the buffer to the done buffers list */ spin_lock_irqsave(&q->done_lock, flags); vb->state = state; list_add_tail(&vb->done_entry, &q->done_list); atomic_dec(&q->queued_count); #ifdef CONFIG_SYNC sw_sync_timeline_inc(q->timeline, 1); #endif spin_unlock_irqrestore(&q->done_lock, flags); /* 應用程序select 時 poll_wait 裏休眠,如今有數據了喚醒 */ wake_up(&q->done_wq); }

  開始的時候咱們將以一個 vb_buffer 掛入 vb_vidq->queued_list ,當啓動視頻傳輸以後,它被取出掛入 vb_vidq->vidq->active 隊列,而後在內核線程中每個 tick ,又將它取出填充視頻數據以後,再掛入 vb_vidq->done_list ,喚醒正在休眠等待視頻數據的應用程序。
  2.10 select
驅動層:

vivi_poll(struct file *file, struct poll_table_struct *wait) { struct vivi_dev *dev = video_drvdata(file); struct vb2_queue *q = &dev->vb_vidq; return vb2_poll(q, file, wait); }
unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) { // 掛入休眠隊列,是否休眠還要看返回值,大概沒有數據就休眠,有數據就不休眠
    poll_wait(file, &q->done_wq, wait); if (!list_empty(&q->done_list)) vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); spin_unlock_irqrestore(&q->done_lock, flags); if (vb && (vb->state == VB2_BUF_STATE_DONE || vb->state == VB2_BUF_STATE_ERROR)) { return (V4L2_TYPE_IS_OUTPUT(q->type)) ? res | POLLOUT | POLLWRNORM : res | POLLIN | POLLRDNORM; } return res; }

  喚醒以後,咱們就能夠去從視頻輸出隊列中取出buffer,而後根據映射關係,在應用空間取出視頻數據了
  2.11 VIDIOC_DQBUF
應用層:

ret = ioctl(fd, VIDIOC_DQBUF, &buf); if (ret < 0) { LOG("VIDIOC_DQBUF failed (%d)\n", ret); return ret; } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct vivi_dev *dev = video_drvdata(file); return vb2_dqbuf(&dev->vb_vidq, p, file->f_flags & O_NONBLOCK); } int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) { struct vb2_buffer *vb = NULL; int ret; // 等待在 q->done_list 取出第一個可用的 buffer
    ret = __vb2_get_done_vb(q, &vb, nonblocking); ret = call_qop(q, buf_finish, vb); /* 寫回buffer的信息到用戶空間,應用程序找個這個buffer的mmap以後的地址讀數據 */ __fill_v4l2_buffer(vb, b); /* Remove from videobuf queue */ list_del(&vb->queued_entry); vb->state = VB2_BUF_STATE_DEQUEUED; return 0; } static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,int nonblocking) {     unsigned long flags;     int ret;     /*     * Wait for at least one buffer to become available on the done_list.     */     ret = __vb2_wait_for_done_vb(q, nonblocking);     spin_lock_irqsave(&q->done_lock, flags);     *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);     list_del(&(*vb)->done_entry);     spin_unlock_irqrestore(&q->done_lock, flags);     return 0; } static int buffer_finish(struct vb2_buffer *vb) { struct vivi_dev *dev = vb2_get_drv_priv(vb->vb2_queue); dprintk(dev, 1, "%s\n", __func__); return 0; }
相關文章
相關標籤/搜索