[2.4.20][embeded][arm] memory management issue in a kernel modulei'm writing

From: JLM aka cityhunter
Date: Mon Oct 18 2004 - 16:20:06 EST


hello everybody, nice to see you
I'm working on an embeded solution on arm 920, which use an ov9640 camera
the driver I got originaly was a montavista one under GPL, although there is not much left of the original (buggy) code
the camera hasn't internal memory and since we use official kernel v4l2api we want to provide a buffering mechanism

I saw that the only way to do that is to use the no_page method of vma
is this the only way to remap memory obtained through __get_dma_page()?

when I allocate the buffer I reserve the memory to prevent swapping using SetPageReserved()
(I know this isn't be right way but if I use LockPage() I get one of the issue below)
I perform my no_page work, evrything is fine, the perfs are here (60fps on a raw frame and 30 over a [userspace] rotated one) that's cool
on close of the vma, I preform the step in reverse order : delock the memory using ClearPageReserved() and free the pages

I putted some debug statement and the steps are strictly symetrical, the flags set are cleared.... so from my point of view everything is ok
BUT launching my test application and doing a free on the shell several time shows me that there is a memory leak.... so my buffers aren't desallocated..... (or seems so)

so I tried to use LockPage() instead of SetPageReserved() and now I get an even stranger issue : when I launch the 1rst test everything is ok, no frame corruption.... when I launch the second time the test that's the big mess.... activating the debug I see that everything goes right but my wake_up_interruptible () call don't wake up my application. and when I kill it I get a kernel panic....... (aie killing interrupt handler but the backtrace show me that this is in my desalocation function)

I included a part of my source here the function to look for are
mmap_vma_open
mmap_vma_close
mmap_vma_nopage
DeallocateBuffer
AllocateBuffer
and BuffMemReserve


if this is an already known issue just tell me, I'll report it to my leader and see what we can do
if you have any ideas/test just ask
we recently got a BDI2000, I'll see if I can manage to debug with it......
any help is wlecome, I know that the 2.4.* series are outdated but I'm not making the choices ^_^
Best regards

/*
* File: v4l2.c
*
* Description:
* Video for Linux Two Interface for OMAP Capture device.
*
* This driver was highly leveraged from Bill Dirk's
* v4l2 example generic driver. Modified by RidgeRun
* to work with the omap1509 camera module.
*
* Heavily modified again by MontaVista Software to work with
* OMAP 1510/1610/H2 Innovator platforms.
*
* Created 2002, Copyright (C) 2002 RidgeRun, Inc. All rights reserved.
* Created 2002, Copyright (C) 2002 Texas Instruments All rights reserved.
* Copyright (C) 2003-2004 MontaVista Software, Inc. All rights reserved.
*
* Conceptual Usage: (exact syntax may vary)
* 1. insmod videodevX
* 2. insmod camera [unit_video=0]
* driver registers major 81, minor 0, which
* on most systems equates to either /dev/video0
* or sometimes simply /dev/video
* 3. Now you can run apps that use the v4l2 interface
* of the installed camera driver. (see apps/v4l2/<>.c)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Original Author: Bill Dirks <bdirks@xxxxxxxxxxx>
* based on code by Alan Cox, <alan@xxxxxxxxx>
*/
#include <linux/config.h> /* retrieve the CONFIG_* macros */
#include <linux/module.h>
#include <linux/version.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mm.h>
#include <linux/poll.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/videodev.h>
#include <linux/wrapper.h>
#include <asm-arm/unistd.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#define MODULE_NAME "v4l2-omap"
#include "common.h"
#include "camif.h"
#include "v4l2.h"

#define STATIC


//#define dbg(fmt, args...) printk(KERN_DEBUG "%s: " fmt, __func__, ## args)
#define dbg(fmt, args...)
#define ENTRY(x...) dbg("enter\n");
#define EXIT(x...) dbg("exit\n");


static struct tq_struct fbinfo_tsk_q_entry;
static void update_fbinfo_task (void *);

extern long sys_ioctl (unsigned int fd, unsigned int cmd, unsigned long arg);

#define DEFAULT_FRAME_BUFF "/dev/fb0"

/*
* Supported pixel formats. All the underlying cameras must support
* these pixel formats. If the camera doesn't support a pixel format
* in hardware, it will program the camera for the closest supported
* format and then use its convert_image() method.
*/
static struct v4l2_fmtdesc capfmt[] = {
{0, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, {"RGB-16 (5-5-5)"}, V4L2_PIX_FMT_RGB555,
},
{1, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, {"RGB-16 (5-6-5)"}, V4L2_PIX_FMT_RGB565,
},
{2, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, {"RGB-24 (B-G-R)"}, V4L2_PIX_FMT_BGR24,
},
{3, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, {"YUV 4:2:2 (Y-U-Y-V)"},
V4L2_PIX_FMT_YUYV,
},
#if 0
{4, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, {"RGB-32 (B-G-R-?)"},
V4L2_PIX_FMT_BGR32,
},
{5, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, {"Greyscale-8"}, V4L2_PIX_FMT_GREY,
},
{6, V4L2_BUF_TYPE_VIDEO_CAPTURE, 0, {"YUV 4:2:0 (planar)"},
V4L2_PIX_FMT_YUV420,
},
#endif
};

#define NUM_CAPFMT (sizeof(capfmt)/sizeof(capfmt[0]))
#define MAX_BPP 3 /* max bytes per pixel (BGR24) */

/*
* Array of image formats supported by the various cameras used on
* OMAP. These must be ordered from smallest image size to largest.
* The specific camera will support all or a subset of these.
*/
const struct image_size omap_image_size[] = {
{88, 72}, /* QQCIF */
{160, 120}, /* QQVGA */
{176, 144}, /* QCIF */
{320, 240}, /* QVGA */
{352, 288}, /* CIF */
{640, 480}, /* VGA */
{1280, 960}, /* SXGA */
};

#define NUM_IMAGE_SIZE (sizeof(omap_image_size)/sizeof(omap_image_size[0]))

/*
* Array of pixel formats supported by the various cameras used on
* OMAP. The camera uses its convert_image() method to convert from
* a native pixel format to one of the above capfmt[] formats.
*/
const int omap_pixfmt_depth[] = {
16, /* YUV */
16, /* RGB565 */
15 /* RGB555 */
};

/* Extreme video dimensions */
#define MIN_WIDTH 32
#define MIN_HEIGHT 24
#define MAX_WIDTH (omap_image_size[NUM_IMAGE_SIZE-1].width)
#define MAX_HEIGHT (omap_image_size[NUM_IMAGE_SIZE-1].height)
#define MAX_IMAGE_SIZE (MAX_WIDTH * MAX_HEIGHT * MAX_BPP)

#define MAX_FRAME_AGE 200 /* ms */

#undef dbg
#define dbg(x...)



/*
* The Capture device structure array. This is the only global
* variable in the module besides those used by the device probing
* and enumeration routines (command line overrides)
*/

//static int unit_video = 0;
//MODULE_PARM(unit_video, "1-"__MODULE_STRING(NBOARDS)"i");


STATIC int
get_framebuffer_info (struct capture_device *dev)
{
int fbfd, retcode;

ENTRY ();
dev->fbinfo_valid = 0;
fbfd = sys_open (DEFAULT_FRAME_BUFF, O_RDWR, 0);
if (fbfd < 0)
{
err ("Error: cannot open framebuffer device.\n");
return fbfd;
}
// Get fixed screen information
if ((retcode = sys_ioctl (fbfd, FBIOGET_FSCREENINFO,
(unsigned long) (&dev->fbfix))))
{
err ("Error reading fb fixed information.\n");
return retcode;
}
// Get variable screen information
if ((retcode = sys_ioctl (fbfd, FBIOGET_VSCREENINFO,
(unsigned long) (&dev->fbvar))))
{
err ("Error reading fb var information.\n");
return retcode;
}

sys_close (fbfd);
dev->fbinfo_valid = 1;

return 0;
}



STATIC int
isqrt (unsigned int q)
{ /* A little integer square root routine */
int i;
int r;
unsigned int b2 = 0x40000000;
unsigned int t;

ENTRY ();
for (i = 16, r = 0; i > 0 && q; --i)
{
t = ((unsigned int) r << i) + b2;
if (t <= q)
{
q -= t;
r |= (1 << (i - 1));
}
b2 >>= 2;
}
return r;
}

STATIC unsigned long
current_time_ms (void)
{
struct timeval now;

ENTRY ();
do_gettimeofday (&now);
return now.tv_sec * 1000 + now.tv_usec / 1000;
}


/*
* Simple queue management taken from v4l2 old helping api
*/
static rwlock_t rw_lock_unlocked = RW_LOCK_UNLOCKED;
void
v4l2_q_init (struct v4l2_queue *q)
{
if (q == NULL)
return;
q->qlock = rw_lock_unlocked;
q->forw = (struct v4l2_q_node *) q;
q->back = (struct v4l2_q_node *) q;
}

void
v4l2_q_add_head (struct v4l2_queue *q, struct v4l2_q_node *node)
{
unsigned long flags;
if (q == NULL || node == NULL)
return;
if (q->forw == NULL || q->back == NULL)
v4l2_q_init (q);
write_lock_irqsave (&(q->qlock), flags);
node->forw = q->forw;
node->back = (struct v4l2_q_node *) q;
q->forw->back = node;
q->forw = node;
write_unlock_irqrestore (&(q->qlock), flags);
}

void
v4l2_q_add_tail (struct v4l2_queue *q, struct v4l2_q_node *node)
{
unsigned long flags;
if (q == NULL || node == NULL)
return;
if (q->forw == NULL || q->back == NULL)
v4l2_q_init (q);
write_lock_irqsave (&(q->qlock), flags);
node->forw = (struct v4l2_q_node *) q;
node->back = q->back;
q->back->forw = node;
q->back = node;
write_unlock_irqrestore (&(q->qlock), flags);
}

void *
v4l2_q_del_head (struct v4l2_queue *q)
{
unsigned long flags;
struct v4l2_q_node *node;
if (q == NULL)
return NULL;
write_lock_irqsave (&(q->qlock), flags);
if (q->forw == NULL || q->back == NULL ||
q->forw == (struct v4l2_q_node *) q ||
q->back == (struct v4l2_q_node *) q)
{
write_unlock_irqrestore (&(q->qlock), flags);
return NULL;
}
node = q->forw;
node->forw->back = (struct v4l2_q_node *) q;
q->forw = node->forw;
node->forw = NULL;
node->back = NULL;
write_unlock_irqrestore (&(q->qlock), flags);
return node;
}

void *
v4l2_q_del_tail (struct v4l2_queue *q)
{
unsigned long flags;
struct v4l2_q_node *node;
if (q == NULL)
return NULL;
write_lock_irqsave (&(q->qlock), flags);
if (q->forw == NULL || q->back == NULL ||
q->forw == (struct v4l2_q_node *) q ||
q->back == (struct v4l2_q_node *) q)
{
write_unlock_irqrestore (&(q->qlock), flags);
return NULL;
}
node = q->back;
node->back->forw = (struct v4l2_q_node *) q;
q->back = node->back;
node->forw = NULL;
node->back = NULL;
write_unlock_irqrestore (&(q->qlock), flags);
return node;
}

void *
v4l2_q_peek_head (struct v4l2_queue *q)
{
unsigned long flags;
struct v4l2_q_node *node;
read_lock_irqsave (&(q->qlock), flags);
if (q == NULL || q->forw == NULL || q->forw == (struct v4l2_q_node *) q)
{
read_unlock_irqrestore (&(q->qlock), flags);
return NULL;
}
node = q->forw;
read_unlock_irqrestore (&(q->qlock), flags);
return node;
}

void *
v4l2_q_peek_tail (struct v4l2_queue *q)
{
unsigned long flags;
struct v4l2_q_node *node;
read_lock_irqsave (&(q->qlock), flags);
if (q == NULL || q->back == NULL || q->back == (struct v4l2_q_node *) q)
{
read_unlock_irqrestore (&(q->qlock), flags);
return NULL;
}
node = q->back;
read_unlock_irqrestore (&(q->qlock), flags);
return node;
}

void *
v4l2_q_yank_node (struct v4l2_queue *q, struct v4l2_q_node *node)
{
unsigned long flags;
struct v4l2_q_node *t;
if (v4l2_q_peek_head (q) == NULL || node == NULL)
return NULL;
write_lock_irqsave (&(q->qlock), flags);
for (t = q->forw; t != (struct v4l2_q_node *) q; t = t->forw)
{
if (t == node)
{
node->back->forw = node->forw;
node->forw->back = node->back;
node->forw = NULL;
node->back = NULL;
write_unlock_irqrestore (&(q->qlock), flags);
return node;
}
}
write_unlock_irqrestore (&(q->qlock), flags);
return NULL;
}

int
v4l2_q_last (struct v4l2_queue *q)
{
/* This function by Olivier Carmona */

unsigned long flags;
read_lock_irqsave (&(q->qlock), flags);
if (q == NULL)
{
read_unlock_irqrestore (&(q->qlock), flags);
return -1;
}
if (q->forw == NULL || q->back == NULL ||
q->forw == (struct v4l2_q_node *) q ||
q->back == (struct v4l2_q_node *) q)
{
read_unlock_irqrestore (&(q->qlock), flags);
return -1;
}
if (q->forw == q->back)
{
read_unlock_irqrestore (&(q->qlock), flags);
return 1;
}
read_unlock_irqrestore (&(q->qlock), flags);
return 0;
}

//end of snapshot old helping api

/*
*
* V I D E O D E C O D E R S
*
*/



STATIC int
decoder_set_input (struct capture_device *dev, int i)
{
ENTRY ();
dev->input = i;
//dev->videc.decoder_is_stable = 0;
/* TODO: Switch the hardware to the new input */
return 1;
}

STATIC int
decoder_set_frame_period (struct capture_device *dev, struct v4l2_fract fract)
{
int retcode = -EINVAL;
ENTRY ();
if (dev->camif->set_frame_period)
retcode = dev->camif->set_frame_period (&fract);
else
retcode = -ENOEXEC;

if (retcode == 0)
{
dev->videc.frame_fract=fract;
return 0;
}


dev->videc.frame_fract.numerator = 1;
dev->videc.frame_fract.denominator = 30;
return retcode;
}

STATIC int
decoder_set_standard (struct capture_device *dev, int x)
{
dev->videc.standard = x;
struct v4l2_fract fract={1,30};
ENTRY ();
switch (x)
{
case V4L2_STD_NTSC:
fract.denominator = 30;
break;
case V4L2_STD_PAL:
case V4L2_STD_SECAM:
fract.denominator = 25;
break;
}

return decoder_set_frame_period (dev, fract);
}

STATIC int
decoder_set_vcrmode (struct capture_device *dev, int x)
{
ENTRY ();
dev->source[dev->input].vcrmode = x;
/* TODO: Switch decoder to VCR sync timing mode */
return 1;
}

STATIC int
decoder_is_stable (struct capture_device *dev)
{
ENTRY ();
/* TODO: Check if decoder is synced to input */
return 1;
}


STATIC int
decoder_init (struct capture_device *dev)
{
ENTRY ();
decoder_set_frame_period(dev,dev->videc.frame_fract);
decoder_set_input(dev,dev->input);
return 0;
}


STATIC int
decoder_probe (struct capture_device *dev)
{
ENTRY ();
/* TODO: Probe I2C bus or whatever for the video decoder */
//dbg("Found decoder chip\n");
return 1; /* Found */
}


/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
*
* Probe I2C bus for video decoder and fill in the device fields
*/
STATIC int
find_decoder (struct capture_device *dev)
{
ENTRY ();
if (!decoder_probe (dev))
return 0; /* Failure */
return 1;
}

STATIC void
set_video_input (struct capture_device *dev, int i)
{
ENTRY ();
if (i < 0 || i >= dev->videc.num_inputs)
return;
dev->videc.set_input (dev, i);
dev->videc.set_vcrmode (dev, dev->source[i].vcrmode);
}



/*
*
* V I D E O C A P T U R E F U N C T I O N S
*
*/


/* Stop the music!
*/
STATIC void
capture_abort (struct capture_device *dev)
{
ENTRY ();

/* Turn off the capture hardware */
dev->camif->abort ();
dev->capture_started = 0;
dev->streaming = 0;


while (NULL != v4l2_q_del_head (&dev->stream_q_done));
while (NULL != v4l2_q_del_head (&dev->stream_q_capture));
/*
* Wake up any processes that might be waiting for a frame
* and let them return an error
*/
wake_up_interruptible (&dev->new_video_frame);

EXIT ();
}


STATIC void
validate_format (struct v4l2_pix_format *fmt)
{
int n;

ENTRY ();
switch (fmt->pixelformat)
{
case V4L2_PIX_FMT_GREY:
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_BGR24:
case V4L2_PIX_FMT_BGR32:
break;
default:
dbg ("unknown format %4.4s\n", (char *) &fmt->pixelformat);
fmt->pixelformat = V4L2_PIX_FMT_YUYV;
fmt->field = V4L2_FIELD_NONE;
break;
}
//find nearest size
dbg ("requested format WxH : %dx%d\n", fmt->width, fmt->height);
n = NUM_IMAGE_SIZE - 1;
if (fmt->width > MAX_WIDTH)
fmt->width = MAX_WIDTH;
while (n > 0 && omap_image_size[n].width > fmt->width)
{
//dbg("n :%d, %d>%d\n",n,omap_image_size[n].width,fmt->width);
n--;
}
fmt->width = omap_image_size[n].width;
fmt->height = omap_image_size[n].height;
dbg ("granted format WxH : %dx%d\n", fmt->width, fmt->height);
EXIT ();
}

/* The image format has changed, width, height, pixel format.
* Decide if the format is ok or take the closest valid format.
*/
STATIC int
capture_new_format (struct capture_device *dev)
{

int max_height;
int max_width;
int retcode = 0;

ENTRY ();

dev->ready_to_capture = 0;

max_width = MAX_WIDTH;
max_height = MAX_HEIGHT;

validate_format (&(dev->clientfmt));

// desired default.
dev->clientfmt.field = V4L2_FIELD_TOP;

switch (dev->clientfmt.pixelformat)
{
case V4L2_PIX_FMT_GREY:
dev->depth = 8;
dev->capture_bypp = 1;
break;
case V4L2_PIX_FMT_YUV420:
dev->depth = 12;
dev->capture_bypp = 2;
break;
case V4L2_PIX_FMT_RGB555:
case V4L2_PIX_FMT_RGB565:
dev->clientfmt.field = V4L2_FIELD_NONE;
// fall thru
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_UYVY:
dev->depth = 16;
dev->capture_bypp = 2;
break;
case V4L2_PIX_FMT_BGR24:
dev->depth = 24;
dev->capture_bypp = 3;
dev->clientfmt.field = V4L2_FIELD_NONE;
break;
case V4L2_PIX_FMT_BGR32:
dev->depth = 32;
dev->capture_bypp = 4;
dev->clientfmt.field = V4L2_FIELD_NONE;
break;
default:
dbg ("unknown format %4.4s\n", (char *) &dev->clientfmt.pixelformat);
dev->depth = 16;
dev->capture_bypp = 2;
dev->clientfmt.pixelformat = V4L2_PIX_FMT_YUYV;
dev->clientfmt.field = V4L2_FIELD_NONE;
break;
}


dbg ("after max WxH : %dx%d\n", dev->clientfmt.width,
dev->clientfmt.height);

// tell the camera about the format, it may modify width
// and height.
if (dev->camera)
{
if ((retcode = dev->camera->set_format (&dev->clientfmt)))
{
EXIT ();
return retcode;
}
}

dbg ("after camera WxH : %dx%d\n", dev->clientfmt.width,
dev->clientfmt.height);
dev->clientfmt.sizeimage =
(dev->clientfmt.width * dev->clientfmt.height * dev->depth) / 8;

dev->read_buffer.vidbuf.length =
dev->clientfmt.width * dev->clientfmt.height * dev->capture_bypp;

EXIT ();
return 0;
}

STATIC void
BuffMemReserve (struct scatter_node *buff_mem, int lock)
{
struct scatter_node *curr=buff_mem;
int j;
void *address;
struct page *page;
ENTRY();
if (curr == NULL )
{
emerg("nothing to unlock\n");
}
else
{
while (curr != NULL)
{
for (j = (1 << get_order (curr->len)) - 1; j >= 0; j--)
{
address = (void *) (curr->addr + j * PAGE_SIZE);
if (address != NULL)
{
page = virt_to_page ((void *) (address));
if (page != NULL)
{
if ( lock )
//~ LockPage(page);
SetPageReserved (page);
else
//~ UnlockPage(page);
ClearPageReserved (page);
#warning
set_page_count(page,0);
emerg(" page %p count : %d flags %X \t @ %p @p %p\n",
page, page->count, page->flags,
address, page_address(page));
}
else
{
emerg("page == NULL \n");
}
}
else
{
emerg("adress ==NULL\n");
}
}
curr = curr->next;
}
}
EXIT();
}
// **************************
// Routine:
// Description:
// **************************
STATIC void
DeallocateBuffer (struct stream_buffer *buf)
{
struct scatter_node *curr;

ENTRY ();
if (buf == NULL)
return;
curr = buf->dma_list;
if (curr == NULL )
{
emerg("nothing to dealloc\n");
}
else
{
BuffMemReserve (curr, 0);
while (curr != NULL)
{
emerg("curr :%p adrr %lu len %lu order %d\n", curr,
curr->addr, curr->len,get_order (curr->len));
free_pages ((unsigned long) curr->addr, get_order (curr->len));
curr->addr=0;
curr->len=0;
curr = curr->next;
}
}

if (buf->dma_list)
{
kfree (buf->dma_list);
buf->dma_list = NULL;
}
}



// **************************
// Routine:
// Description:
// **************************

#define MAX_ORDER_int 9
STATIC int
AllocateBuffer (struct stream_buffer *buf)
{

int scattergathercount = 0;
int i = 0;
unsigned long length = buf->vidbuf.length;
int order;
ENTRY ();
DeallocateBuffer (buf);


#warning find the define that give the maximum order
order = get_order (length);
scattergathercount = (order / MAX_ORDER_int) + 1;
buf->dma_list =
kmalloc (sizeof (struct scatter_node) * scattergathercount, GFP_KERNEL);
if (buf->dma_list == NULL)
{
return 1;
}
dbg ("allocating list success order: %d scattergather %d @%lu\n", order,
scattergathercount, buf->dma_list);

for (i = 0; i < scattergathercount; i++)
{
order = get_order (length);
dbg ("length %lu\n", length);
if (order >= MAX_ORDER_int)
{
order = MAX_ORDER_int - 1;
}
buf->dma_list[i].next = &(buf->dma_list[i + 1]);
buf->dma_list[i].addr = __get_dma_pages (GFP_KERNEL, order);
dbg("allocated bufferp %lu order %d\n",buf->dma_list[i].addr,order);
if (buf->dma_list[i].addr == 0) //we work with getpage return which is a ulong
{
DeallocateBuffer (buf);
return 1;
}

buf->dma_list[i].len = (i == scattergathercount - 1) ?
length : PAGE_SIZE * 1 << order;
length -= buf->dma_list[i].len;
dbg ("orders %d %d\n", order, get_order (buf->dma_list[i].len));


//~ int pagecount = 0;
//~ int j;
//~ void *address;
//~ for (j = (1 << get_order (buf->dma_list[i].len)) - 1; j >= 0; j--)
//~ {
//~ address = (void *) (buf->dma_list[i].addr + j * PAGE_SIZE);
//~ dbg ("%d %lu %lu \n", j, address, buf->dma_list[i].len);
//~ if (address != NULL)
//~ clear_page (address);
//~ else
//~ info ("clearing page failled O_O!\n");
//~ }

dbg ("allocating buffer length %d success\n",
buf->vidbuf.length / scattergathercount);
dbg ("allocating buffer %d success\n", i);
}
buf->dma_list[scattergathercount - 1].next = NULL;
buf->dma_list[scattergathercount - 1].last = 1;
BuffMemReserve (buf->dma_list, 1);
return 0;
}


/* Allocate buffers, and get everything ready to capture
* an image, but don't start capturing yet.
*/
STATIC int
capture_begin (struct capture_device *dev)
{
//dbg("capture_begin\n");
ENTRY ();

if (dev->ready_to_capture)
{
EXIT ();
return dev->ready_to_capture;
}

#warning it s better to reallocate the buffer on format change
if ((dev->capture_buffer_size < dev->read_buffer.vidbuf.length) ||
dev->SwitchInputs)
{
dev->SwitchInputs = 0;
if (AllocateBuffer (&dev->read_buffer))
return 0;
#warning FIX THIS
if (dev->read_buffer.dma_list == NULL)
{
dev->capture_buffer_size = 0;
err ("Can't allocate capture buffer"
" %d bytes\n", dev->read_buffer.vidbuf.length);
EXIT ();
return dev->ready_to_capture;
}
}
dbg("Ready to capture!\n");
EXIT ();
return (dev->ready_to_capture = 1);
}

#warning move calls to previous function on format change....

/* Start an image capture
*/
STATIC void
capture_grab_frame (struct capture_device *dev)
{
ENTRY ();
if (dev->ready_to_capture && dev->capture_started)
{
dbg ("capture already active\n");
return;
}

capture_begin (dev);
if (!dev->ready_to_capture)
return;
while (NULL != v4l2_q_del_head (&dev->stream_q_done));
while (NULL != v4l2_q_del_head (&dev->stream_q_capture));
if (dev->camera && dev->input == 0)
{
// Start the camera h/w. It will call us back
// on image completion.
dbg ("starting hardware snapshot\n");
v4l2_q_add_tail (&(dev->stream_q_capture),
(struct v4l2_q_node *) &(dev->read_buffer));
dev->camif->abort();
dev->camif->camif_queue_buffer (dev->read_buffer.dma_list);
dev->camif->resume();
}
dev->capture_started = 1;
dev->capture_completed = 0;
}

/* Start an image stream
*/
STATIC void
capture_stream_start (struct capture_device *dev)
{
ENTRY();
struct stream_buffer* head;

capture_begin (dev);

if (dev->ready_to_capture && dev->capture_started)
{
dbg ("capture already active\n");
return;
}
head= (struct stream_buffer*) v4l2_q_peek_head (&dev->stream_q_capture);

if (dev->camera && dev->input == 0)
{
// Start the camera h/w. It will call us back
// on image completion.
dev->camif->resume();
dev->capture_started = 1;
dev->capture_completed = 0;
}
else
{
dev->capture_started = 1;
dev->capture_completed = 0;
}
EXIT();
}

/*
* STREAMING CAPTURE
*/


STATIC int
capture_queuebuffer (struct capture_device *dev, struct v4l2_buffer *vidbuf)
{
int i = vidbuf->index;
struct stream_buffer *buf = NULL;
ENTRY();
if (!dev->stream_buffers_mapped)
{
info ("QBUF no buffers mapped\n");
return 1;
}
if (vidbuf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
{
info ("QBUF wrong type\n");
return 1;
}
if (i < 0 || i >= MAX_CAPTURE_BUFFERS || !dev->stream_buf[i].requested)
{
info ("QBUF buffer index %d is out of range\n", i);
return 1;
}

buf = &dev->stream_buf[i];

if (!(buf->vidbuf.flags & V4L2_BUF_FLAG_MAPPED))
{
info ("QBUF buffer %d is not mapped\n", i);
return 1;
}
if ((buf->vidbuf.flags & V4L2_BUF_FLAG_QUEUED))
{
info ("QBUF buffer %d is already queued\n", i);
return 1;
}

buf->vidbuf.flags &= ~V4L2_BUF_FLAG_DONE;
v4l2_q_add_tail (&dev->stream_q_capture, &buf->qnode);
buf->vidbuf.flags |= V4L2_BUF_FLAG_QUEUED;

dev->camif->camif_queue_buffer (buf->dma_list);
if (dev->streaming)
{
//do nothing if streaming is already active, else reactivate the camera
dev->camif->resume();
}
return 0;
}

STATIC int /* 1 = got a buffer; 0 = no buffers */
capture_dequeuebuffer (struct capture_device *dev, struct v4l2_buffer *buf)
{
struct stream_buffer *newbuf;
ENTRY();
if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
{
dbg ("DQBUF wrong buffer type\n");
return -EINVAL;
}
newbuf = v4l2_q_del_head (&dev->stream_q_done);
if (newbuf == NULL)
{
dbg ("DQBUF nothing on done queue\n");
return -EAGAIN;
}
newbuf->vidbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
dbg ("newbuf->vidbuf.index %d\n", newbuf->vidbuf.index);
dbg ("newbuf->vidbuf.type %d\n", newbuf->vidbuf.type);
dbg ("newbuf->vidbuf.flags %d\n", newbuf->vidbuf.flags);
*buf = newbuf->vidbuf;
return 0;
}

STATIC int
capture_streamon (struct capture_device *dev)
{
struct stream_buffer *buf;
ENTRY();

if (dev->streaming)
{
EXIT ();
return 1;
}

/* -2 is a magic number that triggers start-of-stream logic in */
/* capture_interrupt() */
dev->stream_last_frame = -2;




/* Move any leftover DONE buffers to the free pool */
while ((buf = v4l2_q_del_head (&dev->stream_q_done)))
buf->vidbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;

/* Kick off the machine in continuous capture mode */
dev->streaming = 1;
capture_stream_start (dev);
EXIT ();
//dbg("end capture_streamon\n");
return 1;
}

STATIC void
capture_streamoff (struct capture_device *dev)
{
ENTRY ();
if (!dev->streaming)
{
EXIT ();
return;
}
capture_abort (dev); //cancel dmas

EXIT ();
}


/*
* Convert raw camera image directly into framebuffer (used by
* preview mode). Returns length of data or negative for error.
*/
STATIC int
capture_display_image (struct capture_device *dev, __u8 * capture_buffer)
{
ENTRY();
if (!dev->capture_started)
{
// If we get here is probably because the PREVIEW
// mode was turned off just prior to the interrupt
// routines completing the last image. We don't
// want anymore images. Just return.
dbg ("capture not started??\n");
return 0;
}

if (dev->camera)
{
int dest_stride;

if (!dev->fbinfo_valid)
{
err ("preview set but no valid fb info\n");
return -EINVAL;
}

dest_stride = (dev->fbvar.xres_virtual * dev->fbvar.bits_per_pixel) / 8;

dev->camera->convert_image ((u8 *) capture_buffer,
(u8 *) phys_to_virt (dev->fbfix.
smem_start),
0, dest_stride, &dev->clientfmt);
}


return dev->clientfmt.sizeimage;
}



/* The hardware has issued the interrupt signal, do any post-capture
* processing that may be necessary.
* [This function is called indirectly through the immediate task queue;
* it executes at elevated IRQL, but it is interruptible. (It's a b.h.)]
*/

STATIC void
capture_interrupt (void *v)
{
struct capture_device *dev = (struct capture_device *) v;
struct stream_buffer *buf;
int len;
__s64 elapsed_time;
__u64 desiredtimeperframe = 1000000000 * dev->capture.timeperframe.numerator /
dev->capture.timeperframe.denominator;


ENTRY();

//the omap internal clock has a precision of the 0.01s.... so mask the desired timeperframe
//desiredtimeperframe is in nanosec
desiredtimeperframe /= 10000000;
desiredtimeperframe *= 10000000;

buf = v4l2_q_peek_head (&dev->stream_q_capture);
if (!dev->capture_started || dev->capture_completed || buf == NULL)
{
err("capture not started or completed or buff null\n");
return;
}
if (!dev->streaming)
dev->capture_completed = 1;

if (!dev->streaming)
{
dev->time_acquired = current_time_ms ();
if (dev->preview)
{
#warning capture display isnt functionnal due to new buffer handling
//capture_display_image (dev, dev->capture_buffer);
dev->capture_started = 0;
capture_grab_frame (dev); // Start another capture.
}
buf = v4l2_q_del_head (&dev->stream_q_capture);
v4l2_q_add_tail (&dev->stream_q_done, &buf->qnode);
wake_up_interruptible (&dev->new_video_frame);
return;
}

/* Only get here in streaming mode */
if (dev->stream_last_frame == -2)
{
/* First frame of the stream */
dev->stream_begin=dev->last_time;
dev->stream_last_frame = 0;
dbg("stream begin : %lu,%.9lu at %llu\n",
dev->stream_begin.tv_sec,
dev->stream_begin.tv_nsec,
desiredtimeperframe);
}

if (buf == NULL)
{
/*
* No available buffers. Skip this frame. This is not an
* error, it's a normal way to throttle the capture rate
*/
err("skip frame");
return;
}
dbg("it : capture buffer %d dequeued (frame present)\n", buf->vidbuf.index);

/* Compute current stream time */

elapsed_time =
(dev->last_time.tv_sec - dev->stream_begin.tv_sec) * 1000000000 +
(dev->last_time.tv_nsec - dev->stream_begin.tv_nsec);
dbg("%lu,%.9lu\n",dev->last_time.tv_sec,dev->last_time.tv_nsec);
dbg("elapsed_time : %lld\n",elapsed_time);




/* Capture rate control */
if (dev->capture.timeperframe.numerator != dev->videc.frame_fract.numerator
||dev->capture.timeperframe.denominator !=
dev->videc.frame_fract.denominator
)
{
if ( elapsed_time < desiredtimeperframe)
{
/* Not time yet, don't keep this frame */
buf = v4l2_q_del_head (&dev->stream_q_capture);
//requeue the buffer for processing...
buf->vidbuf.flags &= ~V4L2_BUF_FLAG_QUEUED;
if (capture_queuebuffer (dev, &(buf->vidbuf)))
err ("not able to requeue O_O!\n");
emerg ("droping frame \n");
return;
}
}

/* We do computations on a relative timestamp (since previous */
/* non dropped frame) */
dev->stream_begin=dev->last_time;


/* Want this frame */
dev->stream_last_frame++;
len = dev->clientfmt.sizeimage;

/* Fill in the buffer emergrmation fields */
buf->vidbuf.flags |= V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_KEYFRAME;

/* Returned timestamp should be absolute system time */
buf->vidbuf.timestamp.tv_sec = dev->last_time.tv_sec;
buf->vidbuf.timestamp.tv_usec = dev->last_time.tv_nsec*1000;
buf->vidbuf.sequence = dev->stream_last_frame;


/* Move buffer to done queue */
buf = v4l2_q_del_head (&dev->stream_q_capture);
v4l2_q_add_tail (&dev->stream_q_done, &buf->qnode);

/* A new frame is ready! */
dbg("frame elapsed time : %lld\n", elapsed_time);
wake_up_interruptible (&dev->new_video_frame);

EXIT();
}

/*
* This is the callback from the Camera Interface, when
* a new image has been captured by the camif h/w. This
* is called at interrupt time, so just mark a bh and do
* nothing more.
*/
STATIC void
camif_capture_callback (void *data)
{
struct capture_device *dev = (struct capture_device *) data;
ENTRY();
jiffies_to_timespec (jiffies, &dev->last_time);
dev->tqnode_dpc.routine = capture_interrupt;
dev->tqnode_dpc.data = dev;
queue_task (&dev->tqnode_dpc, &tq_immediate);
mark_bh (IMMEDIATE_BH);
}


/* Read captured data into a user buffer.
* Return: negative = error
* 0 = keep waiting
* positive = count of bytes read successfully
*/
STATIC long
capture_read (struct capture_device *dev,
__u8 * user_buffer, int user_buffer_size)
{
struct stream_buffer *buf =
(struct stream_buffer *) v4l2_q_del_head (&dev->stream_q_done);
int len = user_buffer_size;
struct scatter_node *curr = NULL;
unsigned long offset = 0;

ENTRY();
if (!dev->capture_completed || buf == NULL)
{
/* No interrupt has occurred yet, or DMA didn't finish. */
dbg ("No data ready.\n");
if (!dev->capture_started)
{
//capture_grab_frame (dev);
}
return 0; /* caller should keep waiting */
}

curr = buf->dma_list;

len = dev->clientfmt.sizeimage;
while (curr != NULL)
{
#warning is the cast correct?
copy_to_user (user_buffer + offset, (void *) curr->addr, curr->len);
offset += curr->len;
curr = curr->next;
}

dev->capture_started = 0;
return len;
}

/* Stop capturing and free all resources used for capture.
*/
STATIC void
capture_close (struct capture_device *dev)
{
ENTRY ();
if (dev->streaming)
capture_streamoff (dev);
capture_abort (dev);
dev->ready_to_capture = 0;
DeallocateBuffer (&(dev->read_buffer));
dev->capture_buffer_size = 0;
EXIT ();
}




/*
*
* P O W E R M A N A G E M E N T
*
*/

#if 1 /* MVL-CEE */
#include <linux/device.h>

STATIC int omap_camera_dpm_suspend (struct device *dev, u32 state, u32 level);
STATIC int omap_camera_dpm_resume (struct device *dev, u32 level);

static struct device_driver omap_camera_driver_ldm = {
name:"omap-camera",
devclass:NULL,
probe:NULL,
suspend:omap_camera_dpm_suspend,
resume:omap_camera_dpm_resume,
remove:NULL,
};

static struct device omap_camera_device_ldm = {
name:"OMAP Camera",
bus_id:"Camera",
driver:NULL,
power_state:DPM_POWER_ON,
};

STATIC void
omap_camera_ldm_register (struct capture_device *dev)
{
extern void mpu_public_driver_register (struct device_driver *driver);
extern void mpu_public_device_register (struct device *device);

omap_camera_device_ldm.platform_data = (void *) dev;
mpu_public_driver_register (&omap_camera_driver_ldm);
mpu_public_device_register (&omap_camera_device_ldm);
}

STATIC void
omap_camera_ldm_unregister (struct capture_device *dev)
{
extern void mpu_public_driver_unregister (struct device_driver *driver);
extern void mpu_public_device_unregister (struct device *device);

mpu_public_driver_unregister (&omap_camera_driver_ldm);
mpu_public_device_unregister (&omap_camera_device_ldm);
}

STATIC int
omap_camera_dpm_suspend (struct device *dev, u32 state, u32 level)
{
struct capture_device *cdev = (struct capture_device *) dev->platform_data;

switch (level)
{

case SUSPEND_NOTIFY:
cdev->suspended = 1;
break;

case SUSPEND_POWER_DOWN:
cdev->suspended = 1;
if (cdev->preview)
{
capture_abort (cdev);
cdev->camif->close ();
}
break;
}

return 0;

}

STATIC int
resume_thread (void *data)
{
struct capture_device *dev = (struct capture_device *) data;

daemonize ();
reparent_to_init ();
strcpy (current->comm, "camera_resume");
complete (&dev->resume_thread_sync);

for (;;)
{
wait_event (dev->resume_wq, dev->resume_request);

if (dev->resume_thread_exit)
break;

if (dev->preview)
{
dev->camif->open ();
if (dev->streaming)
capture_stream_start (dev);
else if (dev->preview)
capture_grab_frame (dev);
}

dev->resume_request = dev->suspended = 0;
wake_up (&dev->suspend_wq);
}

complete_and_exit (&dev->resume_thread_sync, 0);
return 0;
}

STATIC int
omap_camera_dpm_resume (struct device *dev, u32 level)
{
struct capture_device *cdev = (struct capture_device *) dev->platform_data;

switch (level)
{
case RESUME_POWER_ON:
/*
* Resume is getting called in an interrupt context on
* OMAP, and resume requires waiting on queues etc. to power
* up the camera. So we can't resume here. So we have to
* use a kernel thread for resume requests (PITA).
*/
cdev->resume_request = 1;
wake_up (&cdev->resume_wq);
break;
}
return 0;
}

#endif /* MVL-CEE */


/*
*
* M E M O R Y M A P P I N G
*
*
*/
STATIC struct stream_buffer *
mmap_stream_buffer_from_offset (struct capture_device *dev,
unsigned long offset)
{
int i;
ENTRY();
offset = offset * PAGE_SIZE;

for (i = 0; i < MAX_CAPTURE_BUFFERS; ++i)
if (offset == dev->stream_buf[i].vidbuf.m.offset)
return &dev->stream_buf[i];
return NULL;
}

STATIC int
mmap_request_buffers (struct capture_device *dev,
struct v4l2_requestbuffers *req)
{
int i;

u32 buflen;
u32 type;
ENTRY();
if (dev->stream_buffers_mapped)
return 0; /* can't make requests if buffers are mapped */
if (req->count < 1)
req->count = 1;
if (req->count > MAX_CAPTURE_BUFFERS)
req->count = MAX_CAPTURE_BUFFERS;
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;

/* The buffer length needs to be a multiple of the page size */
buflen = (dev->clientfmt.sizeimage + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
dbg("Granting %d buffers\n",req->count);

/* Now initialize the buffer structures. Don't allocate the */
/* buffers until they're mapped. */
for (i = 0; i < req->count; ++i)
{
dev->stream_buf[i].dma_list = NULL;
dev->stream_buf[i].requested = 1;
dev->stream_buf[i].vidbuf.index = i;
dev->stream_buf[i].vidbuf.type = type;
/* offset must be unique for each buffer, and a multiple */
/* of PAGE_SIZE on 2.4.x */
dev->stream_buf[i].vidbuf.m.offset = PAGE_SIZE * (i + 1);
dev->stream_buf[i].vidbuf.length = dev->clientfmt.sizeimage;
dev->stream_buf[i].vidbuf.bytesused = 0;
dev->stream_buf[i].vidbuf.flags = 0;
dev->stream_buf[i].vidbuf.timestamp.tv_sec = 0;
dev->stream_buf[i].vidbuf.timestamp.tv_usec = 0;
dev->stream_buf[i].vidbuf.sequence = 0;
memset (&dev->stream_buf[i].vidbuf.timecode, 0,
sizeof (struct v4l2_timecode));
}
for (i = req->count; i < MAX_CAPTURE_BUFFERS; ++i)
dev->stream_buf[i].requested = 0;
dev->stream_buffers_requested = req->count;

return 1;
}

STATIC void
mmap_unrequest_buffers (struct capture_device *dev)
{
int i;
ENTRY();
if (dev->stream_buffers_requested == 0 || dev->stream_buffers_mapped)
return;
for (i = 0; i < MAX_CAPTURE_BUFFERS; ++i)
dev->stream_buf[i].requested = 0;
dev->stream_buffers_requested = 0;
}


/**
* methods used by vma operations : it's not possible to remap allocated memory so we need to pass via the "nopage method"
*/

/**
* open method
*/
STATIC void
mmap_vma_open (struct vm_area_struct *vma)
{
struct device_open *o = (struct device_open *) vma->vm_file->private_data;
struct capture_device *dev = o->dev;
struct stream_buffer *buf;
ENTRY();
#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

if (dev == NULL)
return;
buf = mmap_stream_buffer_from_offset (dev, vma->vm_pgoff);

++buf->vma_refcount;


}

STATIC void
mmap_vma_close (struct vm_area_struct *vma)
{
struct device_open *o = (struct device_open *) vma->vm_file->private_data;
struct capture_device *dev = o->dev;
struct stream_buffer *buf =
mmap_stream_buffer_from_offset (dev, vma->vm_pgoff);
int i, n = 1;

ENTRY();
#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */


--buf->vma_refcount;
if (buf->vma_refcount > 0)
return;
for (i = 0; i < n; ++i)
{
if (dev->streaming)
{
//dbg("Warning- munmap() called while streaming\n");
capture_streamoff (dev);
}
v4l2_q_yank_node (&dev->stream_q_capture, &buf->qnode);
v4l2_q_yank_node (&dev->stream_q_done, &buf->qnode);

DeallocateBuffer(buf);

buf->vidbuf.flags = 0;
//dbg("Buffer %d deallocated\n",(int)vma->vm_pgoff);
++buf;
if (dev->stream_buffers_mapped > 0)
--dev->stream_buffers_mapped;

}
//MOD_DEC_USE_COUNT;
vma->vm_flags &= ~VM_RESERVED;
EXIT();
}

STATIC struct page *
mmap_vma_nopage (struct vm_area_struct *vma, unsigned long address, int write)
{
struct device_open *o = (struct device_open *) vma->vm_file->private_data;
struct capture_device *dev = o->dev;
struct stream_buffer *buf;
unsigned long offset_into_buffer;
struct scatter_node *curr;
struct page *page;
//int n = 1;

ENTRY ();
dbg ("nopage :@page 0x%lx, @ 0x%lx\n", vma->vm_pgoff, address);
if (dev == NULL)
{
dbg ("nopage : dev == NULL \n");
EXIT ();
return 0;
}

#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

buf = mmap_stream_buffer_from_offset (dev, vma->vm_pgoff);

if (buf == NULL)
{
dbg ("nopage : can't find associated buffer, @page 0x%lx, @ 0x%lx\n",
vma->vm_pgoff, address);
EXIT ();
return 0;
}

offset_into_buffer = address - vma->vm_start;
dbg ("offset_into_buffer : 0x%lx\n", offset_into_buffer);

if (offset_into_buffer >= buf->vidbuf.length)
{
err ("Attempt to read past end of mmap() buffer\n");
EXIT ();
return 0;
}
curr = buf->dma_list;
while (offset_into_buffer >= curr->len)
{
offset_into_buffer -= curr->len;
curr = curr->next;
}

page = virt_to_page ((void *) (curr->addr + offset_into_buffer));
dbg ("nopage %d: page %lu curr->addr %lu offest_into_buffer %lu "
"curr->addr + offset_into_buffer %lu\n", vma->vm_pgoff,
page_address (page), curr->addr, offset_into_buffer,
curr->addr + offset_into_buffer);
if (page == NULL)
{
dbg ("nopage : vmalloc_to_page return NULL @page 0x%lx, @ 0x%lx\n",
vma->vm_pgoff, address);
EXIT ();
return NULL;
}
get_page (page);

EXIT ();
return page;
}


static struct vm_operations_struct capture_vma_operations = {
.open = mmap_vma_open,
.close = mmap_vma_close,
.nopage = mmap_vma_nopage,
};

/*
*
* V 4 L 2 I N T E R F A C E
*
*/

STATIC int
v4l2_open (struct inode *inode, struct file *file)
{


struct capture_device *dev;
int retcode;

ENTRY();

//we know have access to file struct : store informations into private data
//this is a hack and a better use of private_data should be performed
if (file->private_data == NULL) //first open
{
//this is not clean but since struct capture_device is used everywhere....
file->private_data = kmalloc (sizeof (struct device_open), GFP_KERNEL);
if (file->private_data == NULL)
return -ENOMEM;
((struct device_open *) file->private_data)->dev = &capture;
}
dev = ((struct device_open *) file->private_data)->dev;
#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

if (GET_USE_COUNT (THIS_MODULE) >= MAX_OPENS)
return -EBUSY;

#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

//dbg("f_flags : %x : %x \n",file->f_flags,O_TRUNC);
if (file->f_flags & O_TRUNC)
{
//dbg("Non-capturing open\n");
((struct device_open *) file->private_data)->noncapturing = 1;
}
else if (dev->capturing_opens)
{
//dbg("No more capturing opens on this device\n");
return -EBUSY;
}
else
{
/* Keep track of whether there is a capturing open */
((struct device_open *) file->private_data)->noncapturing = 0;
dev->capturing_opens = 1;
}


if (GET_USE_COUNT (THIS_MODULE) == 0)
{
if (dev->preview)
{
/* If preview mode is on then we will refrain
from any furthur device initialization since
it isn't needed and actually will disrupt the
automatic preview operation that the driver
is in the process of doing. Recall that apps
can open() the video device, turn on preview,
and then close() it; preview continues. Sometime
later they may re-open() the device and we don't
want that disrupting the on-going preview
processing. */
return 0;
}

if ((retcode = dev->camif->open ()))
{
return retcode;
}

dev->ready_to_capture = 0; /* benchmark changes parameters! */
dev->capture_completed = 0;
dev->capture_started = 0;
v4l2_q_init (&dev->stream_q_capture);
v4l2_q_init (&dev->stream_q_done);
}
//dbg("Open succeeded\n");

/* frame counter for test images only */
if (!((struct device_open *) file->private_data)->noncapturing)
dev->h = dev->m = dev->s = dev->f = 0;

MOD_INC_USE_COUNT;
return 0;
}

STATIC int
v4l2_close (struct inode *inode, struct file *file)
{

struct device_open *o = (struct device_open *) file->private_data;
struct capture_device *dev = o->dev;
ENTRY();
#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

if (!o->noncapturing)
{
dev->capturing_opens = 0;
//dbg("Close\n");
}

if (dev->preview)
{
/* If we have been placed into preview mode then
we want to keep that going even if the app
closes the video device. We expect sometime
later the app, or some app, will open the
video device and turn off preview when it
desires. This video driver handles the
preview operation and all the app needs to
do is briefly open() us and turn on or
off the preview mode and then, if desired,
close() us. */
return 0;
}
// shut things down.
if (GET_USE_COUNT (THIS_MODULE) == 1)
{
capture_close (dev);
dev->camif->close ();
}
kfree (file->private_data);
MOD_DEC_USE_COUNT;
return 0;
}


STATIC int
v4l2_ioctl (struct inode *inode, struct file *file, unsigned int cmd,
unsigned long argp)
{
struct device_open *o = (struct device_open *) file->private_data;
struct capture_device *dev = o->dev;
void *arg = (void *) argp;
ENTRY();

dbg("ioctl called : %s (0x%x) \n", v4l2_ioctl_names[_IOC_NR (cmd)], cmd);

#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

switch (cmd)
{
case VIDIOC_QUERYCAP:
{
#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
struct v4l2_capability b = {
driver:"omap",
card:"innovator",
bus_info:"i2c+???",
version:KERNEL_VERSION (0, 0, 1),
capabilities:V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING,
};

if (copy_to_user(arg, &b, sizeof(struct v4l2_capability)))
return -EFAULT;
//~ *((struct v4l2_capability *) arg) = b;
return 0;
}
case VIDIOC_ENUM_FMT:
{
struct v4l2_fmtdesc f;

if (copy_from_user(&f, arg, sizeof(struct v4l2_fmtdesc)))
return -EFAULT;
if (f.index < 0 || f.index >= NUM_CAPFMT)
return -EINVAL;
f = capfmt[f.index];
#warning ignore the following warning if debug isnt activated
char *pixelf = (char *) &(f.pixelformat);
dbg ("index : %d, %s, flags %d, %s, %c%c%c%c\n", f.index,
v4l2_type_names[f.type], f.flags, f.description, pixelf[0],
pixelf[1], pixelf[2], pixelf[3]);
if (copy_to_user (arg, &f, sizeof(struct v4l2_fmtdesc)))
return -EFAULT;
return 0;
}
case VIDIOC_G_FMT:
{
struct v4l2_format fmt;

if (copy_from_user(&fmt, arg, sizeof(struct v4l2_format)))
return -EFAULT;
if (fmt.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
{
dbg("G_FMT wrong buffer type %d\n",fmt.type);
return -EINVAL;
}
fmt.fmt.pix = dev->clientfmt;
if (copy_to_user(arg, &fmt, sizeof(struct v4l2_format)))
return -EFAULT;
return 0;
}
case VIDIOC_TRY_FMT:
{
struct v4l2_format fmt;

if (copy_from_user(&fmt, arg, sizeof(struct v4l2_format)))
return -EFAULT;
if (fmt.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
validate_format (&fmt.fmt.pix);
#warning ignore the following warning if debug isnt activated
char *pixelformat = (char *) &fmt.fmt.pix.pixelformat;
dbg ("WxHxD : %dx%dx%c%c%c%c , %s, bpl : %d, size :%d, color : %d\n",
fmt.fmt.pix.width, fmt.fmt.pix.height,
pixelformat[0], pixelformat[1], pixelformat[2], pixelformat[3],
v4l2_field_names[fmt.fmt.pix.field], fmt.fmt.pix.bytesperline,
fmt.fmt.pix.sizeimage, fmt.fmt.pix.colorspace);
if (copy_to_user(arg, &fmt, sizeof(struct v4l2_format)))
return -EFAULT;
return 0;
}
case VIDIOC_S_FMT:
{
struct v4l2_format fmt;

if (copy_from_user(&fmt, arg, sizeof(struct v4l2_format)))
return -EFAULT;
if (fmt.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;

if (o->noncapturing)
{
dbg ("S_FMT illegal in non-capturing open\n");
return -EPERM;
}
if (dev->stream_buffers_mapped)
{
dbg ("S_FMT illegal when buffer mapped\n");
return -EPERM;
}

dev->clientfmt = fmt.fmt.pix;

capture_abort (dev);
if (capture_new_format (dev))
return -EINVAL;
if (dev->streaming)
capture_stream_start (dev);
else if (dev->preview)
capture_grab_frame (dev);
fmt.fmt.pix = dev->clientfmt;

if (copy_to_user(arg, &fmt, sizeof(struct v4l2_format)))
return -EFAULT;
return 0;
}
case VIDIOC_REQBUFS:
{
struct v4l2_requestbuffers req;

if(copy_from_user(&req, arg, sizeof(struct v4l2_requestbuffers)))
return -EFAULT;
if (o->noncapturing)
{
dbg("REQBUFS illegal in non-capturing open\n");
return -EPERM;
}
if (dev->stream_buffers_mapped)
{
dbg("Can't request buffers if buffers are "
"already mapped\n");
return -EPERM;
}
mmap_unrequest_buffers (dev);
capture_begin (dev);
if (!mmap_request_buffers (dev, &req))
return -EINVAL;
if (copy_to_user(arg, &req, sizeof(struct v4l2_requestbuffers)))
return -EFAULT;
return 0;
}
case VIDIOC_QUERYBUF:
{
struct v4l2_buffer buf;
int i;

if (copy_from_user(&buf, arg, sizeof(struct v4l2_buffer)))
return -EFAULT;
if (o->noncapturing)
{
dbg("QUERYBUF illegal in non-capturing open\n");
return -EPERM;
}
i = buf.index;
if (i < 0 || i >= MAX_CAPTURE_BUFFERS ||
!dev->stream_buf[i].requested ||
(buf.field != dev->stream_buf[i].vidbuf.field))
{
dbg("QUERYBUF bad parameter\n");
return -EINVAL;
}
buf = dev->stream_buf[i].vidbuf;
if (copy_to_user(arg, &buf, sizeof(struct v4l2_buffer)))
return -EFAULT;
return 0;
}
case VIDIOC_QBUF:
{
struct v4l2_buffer buf;

if (copy_from_user(&buf, arg, sizeof(struct v4l2_buffer)))
return -EFAULT;
if (o->noncapturing)
{
dbg ("QBUF illegal in non-capturing open\n");
return -EPERM;
}
if (!dev->stream_buffers_mapped)
{
dbg ("QBUF no buffers are mapped\n");
return -EINVAL;
}
if (capture_queuebuffer (dev, &buf))
return -EINVAL;
if (copy_to_user(arg, &buf, sizeof(struct v4l2_buffer)))
return -EFAULT;
return 0;
}
case VIDIOC_DQBUF:
{
struct v4l2_buffer buff;
int err=0;

if (copy_from_user(&buff, arg, sizeof(struct v4l2_buffer)))
return -EFAULT;
if (o->noncapturing)
{
dbg ("DQBUF illegal in non-capturing open\n");
return -EPERM;
}

err=capture_dequeuebuffer (dev, &buff);
if ( err )
{
dbg ("dequeue error %d\n", err);
return err;
}
if (copy_to_user(arg, &buff, sizeof(struct v4l2_buffer)))
return -EFAULT;
return 0;
}
case VIDIOC_STREAMON:
{
dbg ("STREAMON\n");

if (o->noncapturing)
{
dbg("STREAMON illegal in non-capturing open\n");
return -EPERM;
}
if (!capture_streamon (dev))
return -EINVAL;
return 0;
}
case VIDIOC_STREAMOFF:
{
dbg ("STREAMOFF\n");

if (o->noncapturing)
{
dbg("STREAMOFF illegal in non-capturing open\n");
return -EPERM;
}
capture_streamoff (dev);
return 0;
}
case VIDIOC_OVERLAY:
{
int on;

if (copy_from_user(&on, arg, sizeof(int)))
return -EFAULT;
dbg ("VIDIOC_OVERLAY %d\n", on); // *revisit-skranz* temp.
if (!dev->preview && (on == 1))
{
// It is "off" now but we want it "on", so...

/*
* schedule the framebuffer dbg updater to run
* (it has to run under keventd).
*/
dev->fbinfo_valid = 0;
fbinfo_tsk_q_entry.routine = update_fbinfo_task;
fbinfo_tsk_q_entry.data = (void *) dev;
schedule_task (&fbinfo_tsk_q_entry);
interruptible_sleep_on (&dev->fbinfo_wait);
if (signal_pending (current))
return -ERESTARTSYS;

dev->preview = 1;
capture_grab_frame (dev);
if (!dev->ready_to_capture)
{
dbg ("ioctl(..,VIDIOC_PREVIEW,...); Can't grab frames!\n");
return -EINVAL;
}
}
else
{
if (dev->preview && (on == 0))
{
// It is "on" now but we want it "off", so...
capture_abort (dev);
dev->ready_to_capture = 0;
dev->preview = 0;
}
}
return 0;
}
case VIDIOC_G_INPUT:
{
if (copy_to_user(arg, &dev->input, sizeof (dev->input)))
return -EFAULT;
return 0;
}
case VIDIOC_S_INPUT:
{
int input;

if (copy_from_user(&input, arg, sizeof(int)))
return -EFAULT;
if (input < 0 || input >= dev->videc.num_inputs)
{
//dbg("Input out of range %d\n", input);
return -EINVAL;
}
if (input != dev->input)
{
dev->SwitchInputs = 1;
dev->input = input;
set_video_input (dev, input);
}
return 0;
}
case VIDIOC_G_PARM:
{
struct v4l2_streamparm sp;
if (copy_from_user(&sp, arg, sizeof(struct v4l2_streamparm)))
return -EFAULT;
if (sp.type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (copy_to_user(&sp.parm.capture,
&(dev->capture),
sizeof(struct v4l2_streamparm))
)
return -EFAULT;
return 0;
}
case VIDIOC_S_PARM:
{
dbg("VIDIOC_S_PARM \n");
struct v4l2_streamparm sp;
struct v4l2_captureparm *vp;
int errno;

if (copy_from_user(&sp, arg, sizeof(struct v4l2_streamparm)))
return -EFAULT;
vp = &(sp.parm.capture);

dbg ( "setting timeperframe : %d/%d\n",
vp->timeperframe.numerator,
vp->timeperframe.denominator);

if (vp->capturemode & ~dev->capture.capability)
{
dbg ("PARM unsupported capture capability %08X\n",
vp->capturemode);
return -EINVAL;
}

if ((dev->capture.capability & V4L2_CAP_TIMEPERFRAME)
&& (100000000* vp->timeperframe.numerator /
vp->timeperframe.denominator) <
10000)
{
dbg ("PARM time per frame out of range %d\n",
vp->timeperframe.numerator / vp->timeperframe.denominator);
return -EINVAL;
}
if (vp->capturemode != dev->capture.capturemode &&
!o->noncapturing && dev->streaming)
{
dbg ("S_PARM state error\n");
return -EINVAL;
}

if (o->noncapturing)
return 0;
if (dev->capture.capability & V4L2_CAP_TIMEPERFRAME)
{
dev->capture.capturemode = vp->capturemode;

if (vp->capturemode & V4L2_MODE_HIGHQUALITY)
{
vp->timeperframe.numerator=1;
vp->timeperframe.denominator=15;
}

capture_abort (dev);
dbg ( "setting timeperframe : %d/%d\n",
vp->timeperframe.numerator,
vp->timeperframe.denominator);
//make some sanity checks
if ( dev->capture.timeperframe.numerator == 0 )
dev->capture.timeperframe.numerator=1;
if ( dev->capture.timeperframe.denominator == 0 )
dev->capture.timeperframe.denominator=30;
if ( dev->videc.frame_fract.numerator == 0 )
dev->videc.frame_fract.numerator=1;
if ( dev->videc.frame_fract.denominator == 0 )
dev->videc.frame_fract.denominator=30;
if ( vp->timeperframe.numerator == 0 )
vp->timeperframe.numerator=1;
if ( vp->timeperframe.denominator == 0 )
vp->timeperframe.denominator=30;

errno=dev->videc.set_frame_period (dev, vp->timeperframe);
if (errno != 0 && errno != -ENOEXEC )
{
return -EINVAL;
}

if (vp->timeperframe.denominator/vp->timeperframe.numerator
< dev->videc.frame_fract.denominator/dev->videc.frame_fract.numerator)
{
dev->capture.timeperframe=vp->timeperframe;
dbg ("setting fp at %d/%d\n",dev->capture.timeperframe.numerator,
dev->capture.timeperframe.denominator);
}
else
{
dev->capture.timeperframe=dev->videc.frame_fract;
}

if (dev->streaming)
capture_stream_start (dev);
else if (dev->preview)
capture_grab_frame (dev);

}
return 0;
}
case VIDIOC_G_STD:
{
v4l2_std_id std;
if (dev->videc.standard == V4L2_STD_PAL)
std = V4L2_STD_PAL;
else
std = V4L2_STD_NTSC;
if (copy_to_user(arg,&std,sizeof(v4l2_std_id)))
return -EFAULT;
return 0;
}
case VIDIOC_S_STD:
{
v4l2_std_id id;

if (copy_from_user(&id,arg, sizeof(v4l2_std_id)))
return -EFAULT;

if ((o->noncapturing && dev->capturing_opens))
return -EPERM;

if (!(id & dev->videc.standards))
{
dbg ("Bad standard: %u\n", (unsigned) id);
return -EINVAL;
}
if (dev->stream_buffers_mapped)
{
dbg ("S_FMT illegal when buffer mapped\n");
return -EPERM;
}
capture_abort (dev);
dev->videc.set_standard (dev, id);
if (capture_new_format (dev))
return -EINVAL;
if (dev->streaming)
capture_stream_start (dev);
else if (dev->preview)
capture_grab_frame (dev);
return 0;
}
case VIDIOC_ENUMSTD:
{
const struct v4l2_standard ntsc = NTSC; //see v4l2.c
const struct v4l2_standard pal = PAL; //see v4l2.h
struct v4l2_standard std;

if (copy_from_user(&std,arg, sizeof(struct v4l2_standard)))
return -EFAULT;

if (std.index == 0)
std = ntsc;
else if (std.index == 1)
std = pal;
else
return -EINVAL;

if (copy_to_user(arg,&std, sizeof(struct v4l2_standard)))
return -EFAULT;
return 0;
}
case VIDIOC_ENUMINPUT:
{
struct v4l2_input vi;

if (copy_from_user (&vi, arg, sizeof(struct v4l2_input)))
return -EFAULT;
if (vi.index < 0 || vi.index >= dev->videc.num_inputs)
return -EINVAL;
vi = dev->source[vi.index].input;
if (copy_to_user(arg, &vi, sizeof(struct v4l2_input)))
return -EFAULT;
return 0;
}
case VIDIOC_QUERYCTRL:
{
struct v4l2_queryctrl qc;
int err;

if (copy_from_user(&qc, arg, sizeof(struct v4l2_queryctrl) ))
return -EFAULT;
if (dev->camera)
{
err=dev->camera->query_control (&qc);
if (err) return err;
if (copy_to_user(arg, &qc, sizeof(struct v4l2_queryctrl) ))
return -EFAULT;
return 0;
}
return -ENODEV;
}
case VIDIOC_QUERYMENU:
{
struct v4l2_querymenu qm;
int err;

if (copy_from_user(&qm , arg, sizeof(struct v4l2_querymenu) ))
return -EFAULT;
if (dev->camera)
{
err=dev->camera->query_menu (&qm );
if (err) return err;
if (copy_to_user(arg, &qm , sizeof(struct v4l2_querymenu) ))
return -EFAULT;
return 0;
}
return -ENODEV;
}
case VIDIOC_G_CTRL:
{
struct v4l2_control vc;
int err;

if (copy_from_user(&vc , arg, sizeof(struct v4l2_control) ))
return -EFAULT;
if (dev->camera)
{
err=dev->camera->get_control (&vc );
if (err) return err;
if (copy_to_user(arg, &vc , sizeof(struct v4l2_control) ))
return -EFAULT;
return 0;
}
return -ENODEV;
}
case VIDIOC_S_CTRL:
{
struct v4l2_control vc;
int err;

if (copy_from_user(&vc , arg, sizeof(struct v4l2_control) ))
return -EFAULT;
if (dev->camera)
{
err=dev->camera->set_control (&vc );
if (err) return err;
if (copy_to_user(arg, &vc , sizeof(struct v4l2_control) ))\
return -EFAULT;
return 0;
}
return -ENODEV;

}
case VIDIOC_G_TUNER:
return -EINVAL;
case VIDIOC_S_TUNER:
return -EINVAL;

default:
return -EINVAL;
}
return 0;
}

STATIC int
v4l2_mmap (struct file *filp, struct vm_area_struct *vma)
{
struct device_open *o = (struct device_open *) filp->private_data;

struct capture_device *dev = o->dev;
struct stream_buffer *buf;
ENTRY();

#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

if (o->noncapturing)
{
dbg("mmap() called on non-capturing open\n");
return -ENODEV;
}
buf = mmap_stream_buffer_from_offset (dev, vma->vm_pgoff); //get current buffer
if (buf == NULL)
{
dbg ("mmap() Invalid offset parameter\n");
return -EINVAL; /* no such buffer */
}

if (!buf->requested)
{
dbg ("mmap() Buffer is not available for" " mapping\n");
return -EINVAL; /* not requested */
}
if (buf->vidbuf.flags & V4L2_BUF_FLAG_MAPPED)
{
dbg("mmap() Buffer is already mapped\n");
return -EINVAL; /* already mapped */
}

if (AllocateBuffer (buf))
{
return -ENOMEM;
}

if (buf->dma_list == NULL)
{
err ("Could not allocate mmap() buffer\n");
return -ENODEV;
}

buf->vidbuf.flags |= V4L2_BUF_FLAG_MAPPED;
++dev->stream_buffers_mapped;

vma->vm_ops = &capture_vma_operations;
vma->vm_flags |= VM_LOCKED;
vma->vm_file = filp;
if (vma->vm_ops->open)
vma->vm_ops->open (vma);

EXIT();
return 0;
}

STATIC unsigned int
v4l2_poll (struct file *file, struct poll_table_struct *table)
{
struct device_open *o = (struct device_open *) file->private_data;
struct capture_device *dev = o->dev;
ENTRY();
#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

if (o->noncapturing)
{
dbg("poll() illegal in non-capturing open\n");
return POLLERR;
}

if (dev->streaming)
{
void *node;
node = v4l2_q_peek_head (&dev->stream_q_done);
if (node != NULL)
return (POLLIN | POLLRDNORM); /* data is ready now */
node = v4l2_q_peek_head (&dev->stream_q_capture);
if (node == NULL)
{
return POLLERR; /* no frames queued */
}
poll_wait (file, &dev->new_video_frame, table);
return 0;
}

/* Capture is through read() call */
if (dev->capture_completed) /* data is ready now */
return (POLLIN | POLLRDNORM);

capture_grab_frame (dev); /* does nothing if capture is in progress */
if (!dev->ready_to_capture)
{
dbg ("Can't grab frames!\n");
return POLLERR;
}

dbg("avant poll\n");
poll_wait (file, &dev->new_video_frame, table);
dbg("apres poll \n");
return 0;
}

STATIC ssize_t
v4l2_read (struct file * file, char *buf, size_t count, loff_t * f_pos)
{
dbg ("read called\n");
struct device_open *o = (struct device_open *) file->private_data;
struct capture_device *dev = o->dev;
long len = 0;
long my_timeout;

#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

if (o->noncapturing)
{
dbg ("read() illegal in non-capturing open\n");
return -EPERM;
}
if (dev->streaming)
{
dbg ("Can't read() when streaming is on\n");
return -EPERM;
}

capture_grab_frame (dev); /* does nothing if capture is in progress */

if (!dev->ready_to_capture)
{
dbg ("Can't grab frames!\n");
return 0;
}

my_timeout = HZ / 5;
dbg("before while len : %ld\n",len);
while (len == 0)
{
if (!dev->capture_completed)
{
dbg("read:sleep for data\n");
if (file->f_flags & O_NONBLOCK)
{
dbg("EAGAIN\n");
return -EAGAIN;
}
my_timeout =
interruptible_sleep_on_timeout (&dev->
new_video_frame, my_timeout);

}

if (my_timeout == 0)
{
dbg("Timeout on read\n");
break;
}
len = capture_read (dev, buf, count);
}

return len;
}


/*
* Remaining initialization of video decoder etc. This is only
* done when the device is successfully identified and registered.
*/
STATIC int
v4l2_init_done (struct video_device *v)
{
struct capture_device *dev = (struct capture_device *) v;
int i;

info ("v4l2_init_done\n");
#if 1 /* MVL-CEE */
wait_event (dev->suspend_wq, dev->suspended == 0);
#endif /* MVL-CEE */

/* Initialize video input array */
for (i = 0; i < VSOURCE_COUNT; ++i)
{
dev->source[i].input.index = i;
dev->source[i].input.type = V4L2_INPUT_TYPE_CAMERA;
}
strcpy (dev->source[VSOURCE_CAMERA].input.name, "Camera");
strcpy (dev->source[VSOURCE_TEST].input.name, "Color Bar Test");
/*strcpy(dev->source[VSOURCE_TUNER].input.name, "Tuner"); */
/*dev->source[VSOURCE_TUNER].input.type = V4L2_INPUT_TYPE_TUNER; */

/* Initialize the video decoder hardware */
dev->videc.initialize (dev);

/* BUG: get defaults from user somehow... */
set_video_input (dev, VSOURCE_CAMERA);

capture_new_format (dev);
return 0;
}


/* =====================================================================
* The functions below this point are only called during loading
* and unloading of the driver.
*/


/*
* D E V I C E I N I A L I Z A T I O N R O U T I N E S
*
* These routines locate and enable the hardware, and initialize
* the device structure.
*/


//in new v4l2 api if fops field is provided then on open call, interface will replace file.fops
//do NOT set owner field : this will cause usagecount of this module to be externaly handled, thing
//that we don't want
static struct file_operations video_fops = {
llseek : NULL,
read : v4l2_read,
write : NULL, // v4l2_write,
ioctl : v4l2_ioctl,
mmap : v4l2_mmap,
open : v4l2_open,
release : v4l2_close,
poll : v4l2_poll,
};

/* initialisation structures, this is easier to read rather than initialize them into functions*/
static struct capture_device capture = {
v:
{
name : "OMAP1640 V4L2 Capture Driver",
type : VFL_TYPE_GRABBER,
fops : &video_fops,
},
preview : 0,
stream_buffers_mapped : 0,
shortname : "capture",
camif : &camif_innovator,
capture :
{
capability : V4L2_CAP_TIMEPERFRAME | V4L2_MODE_HIGHQUALITY
|V4L2_CAP_STREAMING,
capturemode : 0,
extendedmode : 0,
timeperframe :
{
numerator :1,
denominator :30,
}
},
clientfmt:
{
pixelformat : V4L2_PIX_FMT_RGB565,
bytesperline : 0,
sizeimage : 0,
},
depth : 16,
videc:
{
standards : V4L2_STD_NTSC | V4L2_STD_PAL,
ntsc_hskip : 30,
ntsc_vskip : 12,
ntsc_width : 640,
ntsc_height : 480,
ntsc_field_order : 0,
pal_hskip : 62,
pal_vskip : 14,
pal_width : 640,
pal_height : 480,
pal_field_order : 0,
preferred_field : 0,
num_inputs : 2,
decoder_is_stable : 1,
frame_fract :
{
numerator :1,
denominator :30,
},
//decoder methods
initialize : decoder_init,
set_input : decoder_set_input,
set_standard : decoder_set_standard,
set_vcrmode : decoder_set_vcrmode,
is_stable : decoder_is_stable,
set_frame_period : decoder_set_frame_period,
},
source:
{
{
input:
{
name : "Camera",
index : 1,
type : V4L2_INPUT_TYPE_CAMERA,
}
},
{
input:
{
name : "Color Bar Test",
index : 2,
type : V4L2_INPUT_TYPE_CAMERA,
}
},
},
};



STATIC int
config_a_device (struct capture_device *dev)
{
sprintf (dev->shortname, "capture");

dbg ("avant find_decoder\n");
if (!find_decoder (dev))
{
err ("Bad or unrecognized video decoder\n");
return -ENODEV;
}
dbg ("avant power management\n");
#warning fix this we got a segfault of the power management thread
#if 1 /* MVL-CEE */
/*dev->resume_thread_exit = 0;
dev->resume_thread_pid =
kernel_thread(&resume_thread, dev,
CLONE_FS | CLONE_FILES | CLONE_SIGHAND);
if (dev->resume_thread_pid < 0) {
err("could not start resume thread\n");
return -ENODEV;
}
info("avant omap_camera_ldm_register\n");
omap_camera_ldm_register(dev); */
#endif

return 0;
}

STATIC void
unconfig_a_device (struct capture_device *dev)
{
capture_close (dev);

dev->camif->cleanup ();


#if 1 /* MVL-CEE */
if (dev->resume_thread_pid >= 0)
{
dev->resume_thread_exit = 1;
wake_up (&dev->resume_wq);
wait_for_completion (&dev->resume_thread_sync);
}

omap_camera_ldm_unregister (dev);
#endif

memset (dev, 0, sizeof (capture));
}



STATIC void
update_fbinfo_task (void *dev)
{
struct capture_device *device = (struct capture_device *) dev;

if (device)
{
get_framebuffer_info (device);
wake_up (&device->fbinfo_wait);
}
}




/*
* M O D U L E I N I T A N D C L E A N U P
*/
STATIC void __exit
omap_v4l2_cleanup (void)
{
info ("rmmod");
capture_close (&capture);
capture.camif->cleanup ();
if (capture.is_registered)
{
video_unregister_device ((struct video_device *) &capture);
dbg("Removed device %s\n", dev->shortname);
}
memset (&capture, 0, sizeof (capture));
}


STATIC int __init
omap_v4l2_init (void)
{
int retcode;
info ("camera driver revision 0.0.1.6 (alpha) \n");
// initialize the camera interface
if ((retcode = capture.camif->init (camif_capture_callback, &capture)))
{
err ("Camera Interface init failed\n");
return retcode;
}

capture.camera = capture.camif->camera_detect ();

if (!capture.camera)
{
info ("No camera detected.\n");
return -ENODEV;
}

if (config_a_device (&capture))
{
unconfig_a_device (&capture);
return -ENODEV;
}
if (video_register_device
((struct video_device *) &capture, VFL_TYPE_GRABBER, -1) != 0)
{
err ("Couldn't register the driver.\n");
omap_v4l2_cleanup ();
return -ENODEV;
}

/////////////////////////////////////////////////////////////////////////////////////////////


//info("Initialize the video decoder hardware\n");
capture.camif->open ();
// BUG: get defaults from user somehow...
//info("capture.videc.set_standard\n");
capture.videc.set_standard (&capture, V4L2_STD_NTSC);
//info("capture.videc.set_vcrmode\n");
capture.videc.set_vcrmode (&capture, 0);
//info("set_video_input\n");
set_video_input (&capture, VSOURCE_CAMERA);

init_waitqueue_head (&capture.new_video_frame);
init_waitqueue_head (&capture.fbinfo_wait);
init_waitqueue_head (&capture.suspend_wq);
init_waitqueue_head (&capture.resume_wq);
init_completion (&capture.resume_thread_sync);


capture_new_format (&capture);

capture.camif->close ();

capture.is_registered = 1;

return 0;
}



#ifndef MODULE
int
init_v4l2_omap (struct video_init *ignore)
{
return omap_v4l2_init ();
}
#else
module_init (omap_v4l2_init);
module_exit (omap_v4l2_cleanup);
#endif


MODULE_AUTHOR ("RidgeRun, modified sagem (jl.malet@xxxxxxxxxxx)");
MODULE_DESCRIPTION ("omap video driver");
MODULE_LICENSE ("GPL");