1. kmallocnode
2. 後備高速緩存linux
實例:緩存
驅動程序app
//scullc.c //#include <linux/config.h> #include <linux/sched.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/kernel.h> /* printk() */ #include <linux/slab.h> /* kmalloc() */ #include <linux/fs.h> /* everything... */ #include <linux/errno.h> /* error codes */ #include <linux/types.h> /* size_t */ #include <linux/proc_fs.h> #include <linux/fcntl.h> /* O_ACCMODE */ #include <linux/seq_file.h> #include <linux/cdev.h> #include <linux/slab.h> #include <asm/system.h> /* cli(), *_flags */ #include <asm/uaccess.h> /* copy_*_user */ #include "scullc.h" /* * Our parameters which can be set at load time. */ int scull_major = SCULL_MAJOR; int scull_minor = 0; int scull_nr_devs = SCULL_NR_DEVS; /* number of bare scull devices */ int scull_quantum = SCULL_QUANTUM; int scull_qset = SCULL_QSET; module_param(scull_major, int, S_IRUGO); module_param(scull_minor, int, S_IRUGO); module_param(scull_nr_devs, int, S_IRUGO); module_param(scull_quantum, int, S_IRUGO); module_param(scull_qset, int, S_IRUGO); MODULE_AUTHOR("Alessandro Rubini, Jonathan Corbet"); MODULE_LICENSE("Dual BSD/GPL"); struct kmem_cache *scull_cache; struct scull_dev *scull_devices; /* allocated in scull_init_module */ //kmem_cache_t *scull_cache; //kmem_cache *scull_cache; /* * The proc filesystem: function to read and entry */ int scull_read_procmem(char *buf, char **start, off_t offset, int count, int *eof, void *data) { int i, j, len = 0; int limit = count - 80; /* Don't print more than this */ for (i = 0; i < scull_nr_devs && len <= limit; i++) { struct scull_dev *d = &scull_devices[i]; struct scull_qset *qs = d->data; if (down_interruptible(&d->sem)) return -ERESTARTSYS; len += sprintf(buf+len,"\nDevice %i: qset %i, q %i, sz %li\n", i, d->qset, d->quantum, d->size); for (; qs && len <= limit; qs = qs->next) { /* scan the list */ len += sprintf(buf + len, " item at %p, qset at %p\n", qs, qs->data); if (qs->data && !qs->next) /* dump only the last item */ for (j = 0; j < d->qset; j++) { if (qs->data[j]) len += sprintf(buf + len, " % 4i: %8p\n", j, qs->data[j]); } } up(&scull_devices[i].sem); } *eof = 1; return len; } int scull_trim(struct scull_dev *dev) { struct scull_qset *next, *dptr; int qset = dev->qset; /* "dev" is not-null */ int i; for (dptr = dev->data; dptr; dptr = next) { /* all the list items */ if (dptr->data) { for (i = 0; i < qset; i++) kfree(dptr->data[i]); kfree(dptr->data); dptr->data = NULL; } next = dptr->next; kfree(dptr); } dev->size = 0; dev->quantum = scull_quantum; dev->qset = scull_qset; dev->data = NULL; return 0; } /* * Open and close */ int scull_open(struct inode *inode, struct file *filp) { struct scull_dev *dev; /* device information */ dev = container_of(inode->i_cdev, struct scull_dev, cdev); filp->private_data = dev; /* for other methods */ /* now trim to 0 the length of the device if open was write-only */ if ( (filp->f_flags & O_ACCMODE) == O_WRONLY) { if (down_interruptible(&dev->sem)) return -ERESTARTSYS; scull_trim(dev); /* ignore errors */ up(&dev->sem); } return 0; /* success */ } int scull_release(struct inode *inode, struct file *filp) { return 0; } /* * Data management: read and write */ /* * Follow the list */ struct scull_qset *scull_follow(struct scull_dev *dev, int n) { struct scull_qset *qs = dev->data; /* Allocate first qset explicitly if need be */ if (! qs) { qs = dev->data = kmalloc(sizeof(struct scull_qset), GFP_KERNEL); if (qs == NULL) return NULL; /* Never mind */ memset(qs, 0, sizeof(struct scull_qset)); } /* Then follow the list */ while (n--) { if (!qs->next) { qs->next = kmalloc(sizeof(struct scull_qset), GFP_KERNEL); if (qs->next == NULL) return NULL; /* Never mind */ memset(qs->next, 0, sizeof(struct scull_qset)); } qs = qs->next; continue; } return qs; } /* * Data management: read and write */ ssize_t scull_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct scull_dev *dev = filp->private_data; struct scull_qset *dptr; /* the first listitem */ int quantum = dev->quantum, qset = dev->qset; int itemsize = quantum * qset; /* how many bytes in the listitem */ int item, s_pos, q_pos, rest; ssize_t retval = 0; if (down_interruptible(&dev->sem)) return -ERESTARTSYS; if (*f_pos >= dev->size) goto out; if (*f_pos + count > dev->size) count = dev->size - *f_pos; /* find listitem, qset index, and offset in the quantum */ item = (long)*f_pos / itemsize; rest = (long)*f_pos % itemsize; s_pos = rest / quantum; q_pos = rest % quantum; /* follow the list up to the right position (defined elsewhere) */ dptr = scull_follow(dev, item); if (dptr == NULL || !dptr->data || ! dptr->data[s_pos]) goto out; /* don't fill holes */ /* read only up to the end of this quantum */ if (count > quantum - q_pos) count = quantum - q_pos; if (copy_to_user(buf, dptr->data[s_pos] + q_pos, count)) { retval = -EFAULT; goto out; } *f_pos += count; retval = count; out: up(&dev->sem); return retval; } ssize_t scull_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct scull_dev *dev = filp->private_data; struct scull_qset *dptr; int quantum = dev->quantum, qset = dev->qset; int itemsize = quantum * qset; int item, s_pos, q_pos, rest; ssize_t retval = -ENOMEM; /* value used in "goto out" statements */ if (down_interruptible(&dev->sem)) return -ERESTARTSYS; /* find listitem, qset index and offset in the quantum */ item = (long)*f_pos / itemsize; rest = (long)*f_pos % itemsize; s_pos = rest / quantum; q_pos = rest % quantum; /* follow the list up to the right position */ dptr = scull_follow(dev, item); if (dptr == NULL) goto out; if (!dptr->data) { dptr->data = kmalloc(qset * sizeof(char *), GFP_KERNEL); if (!dptr->data) goto out; memset(dptr->data, 0, qset * sizeof(char *)); } /* if (!dptr->data[s_pos]) { dptr->data[s_pos] = kmalloc(quantum, GFP_KERNEL); if (!dptr->data[s_pos]) goto out; } */ if (!dptr->data[s_pos]){ dptr->data[s_pos] = kmem_cache_alloc(scull_cache,GFP_KERNEL); if (!dptr->data[s_pos]) goto out; memset(dptr->data[s_pos],0,scull_quantum); } /* write only up to the end of this quantum */ if (count > quantum - q_pos) count = quantum - q_pos; if (copy_from_user(dptr->data[s_pos]+q_pos, buf, count)) { retval = -EFAULT; goto out; } *f_pos += count; retval = count; /* update the size */ if (dev->size < *f_pos) dev->size = *f_pos; out: up(&dev->sem); return retval; } /* * The "extended" operations -- only seek */ loff_t scull_llseek(struct file *filp, loff_t off, int whence) { struct scull_dev *dev = filp->private_data; loff_t newpos; switch(whence) { case 0: /* SEEK_SET */ newpos = off; break; case 1: /* SEEK_CUR */ newpos = filp->f_pos + off; break; case 2: /* SEEK_END */ newpos = dev->size + off; break; default: /* can't happen */ return -EINVAL; } if (newpos < 0) return -EINVAL; filp->f_pos = newpos; return newpos; } /* * The ioctl() implementation */ int scull_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int err = 0, tmp; int retval = 0; /* * extract the type and number bitfields, and don't decode * wrong cmds: return ENOTTY (inappropriate ioctl) before access_ok() */ if (_IOC_TYPE(cmd) != SCULL_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SCULL_IOC_MAXNR) return -ENOTTY; /* * the direction is a bitmask, and VERIFY_WRITE catches R/W * transfers. `Type' is user-oriented, while * access_ok is kernel-oriented, so the concept of "read" and * "write" is reversed */ if (_IOC_DIR(cmd) & _IOC_READ) /*through access_ok() check the addr is legal userspace address */ err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); if (err) return -EFAULT; switch(cmd) { case SCULL_IOCRESET: scull_quantum = SCULL_QUANTUM; scull_qset = SCULL_QSET; break; case SCULL_IOCSQUANTUM: /* Set: arg points to the value */ if (! capable (CAP_SYS_ADMIN)) return -EPERM; retval = __get_user(scull_quantum, (int __user *)arg); break; case SCULL_IOCTQUANTUM: /* Tell: arg is the value */ if (! capable (CAP_SYS_ADMIN)) return -EPERM; scull_quantum = arg; break; case SCULL_IOCGQUANTUM: /* Get: arg is pointer to result */ retval = __put_user(scull_quantum, (int __user *)arg); break; case SCULL_IOCQQUANTUM: /* Query: return it (it's positive) */ return scull_quantum; case SCULL_IOCXQUANTUM: /* eXchange: use arg as pointer */ if (! capable (CAP_SYS_ADMIN)) return -EPERM; tmp = scull_quantum; retval = __get_user(scull_quantum, (int __user *)arg); if (retval == 0) retval = __put_user(tmp, (int __user *)arg); break; case SCULL_IOCHQUANTUM: /* sHift: like Tell + Query */ if (! capable (CAP_SYS_ADMIN)) return -EPERM; tmp = scull_quantum; scull_quantum = arg; return tmp; case SCULL_IOCSQSET: if (! capable (CAP_SYS_ADMIN)) return -EPERM; retval = __get_user(scull_qset, (int __user *)arg); break; case SCULL_IOCTQSET: if (! capable (CAP_SYS_ADMIN)) return -EPERM; scull_qset = arg; break; case SCULL_IOCGQSET: retval = __put_user(scull_qset, (int __user *)arg); break; case SCULL_IOCQQSET: return scull_qset; case SCULL_IOCXQSET: if (! capable (CAP_SYS_ADMIN)) return -EPERM; tmp = scull_qset; retval = __get_user(scull_qset, (int __user *)arg); if (retval == 0) retval = put_user(tmp, (int __user *)arg); break; case SCULL_IOCHQSET: if (! capable (CAP_SYS_ADMIN)) return -EPERM; tmp = scull_qset; scull_qset = arg; return tmp; /* * The following two change the buffer size for scullpipe. * The scullpipe device uses this same ioctl method, just to * write less code. Actually, it's the same driver, isn't it? */ /* case SCULL_P_IOCTSIZE: scull_p_buffer = arg; break; case SCULL_P_IOCQSIZE: return scull_p_buffer; */ default: /* redundant, as cmd was checked against MAXNR */ return -ENOTTY; } return retval; } struct file_operations scull_fops = { //The function of system call should obtain the semaphore to protect the sharing the resource .owner = THIS_MODULE, .llseek = scull_llseek, .read = scull_read, .write = scull_write, .ioctl = scull_ioctl, .open = scull_open, .release = scull_release, }; /* * Finally, the module stuff */ /* * The cleanup function is used to handle initialization failures as well. * Thefore, it must be careful to work correctly even if some of the items * have not been initialized */ void scull_cleanup_module(void) { int i; dev_t devno = MKDEV(scull_major, scull_minor); /* Get rid of our char dev entries */ if (scull_devices) { for (i = 0; i < scull_nr_devs; i++) { scull_trim(scull_devices + i); cdev_del(&scull_devices[i].cdev); } kfree(scull_devices); } if (scull_cache) kmem_cache_destroy(scull_cache); #ifdef SCULL_DEBUG /* use proc only if debugging */ scull_remove_proc(); #endif remove_proc_entry("scullmem", NULL /* parent dir */); /* cleanup_module is never called if registering failed */ unregister_chrdev_region(devno, scull_nr_devs); /* and call the cleanup functions for friend devices */ } /* * Set up the char_dev structure for this device. */ static void scull_setup_cdev(struct scull_dev *dev, int index) { int err, devno = MKDEV(scull_major, scull_minor + index); cdev_init(&dev->cdev, &scull_fops); dev->cdev.owner = THIS_MODULE; dev->cdev.ops = &scull_fops; err = cdev_add (&dev->cdev, devno, 1); /* Fail gracefully if need be */ if (err) printk(KERN_NOTICE "Error %d adding scull%d", err, index); } int scull_init_module(void) { int result, i; dev_t dev = 0; /* * Get a range of minor numbers to work with, asking for a dynamic * major unless directed otherwise at load time. */ if (scull_major) { dev = MKDEV(scull_major, scull_minor); result = register_chrdev_region(dev, scull_nr_devs, "scull"); } else { result = alloc_chrdev_region(&dev, scull_minor, scull_nr_devs, "scull"); scull_major = MAJOR(dev); } if (result < 0) { printk(KERN_WARNING "scull: can't get major %d\n", scull_major); return result; } /* * allocate the devices -- we can't have them static, as the number * can be specified at load time */ scull_devices = kmalloc(scull_nr_devs * sizeof(struct scull_dev), GFP_KERNEL); if (!scull_devices) { result = -ENOMEM; goto fail; /* Make this more graceful */ } memset(scull_devices, 0, scull_nr_devs * sizeof(struct scull_dev)); /* Initialize each device. */ for (i = 0; i < scull_nr_devs; i++) { scull_devices[i].quantum = scull_quantum; scull_devices[i].qset = scull_qset; init_MUTEX(&scull_devices[i].sem); //The semaphore should be initialise before the scull device could be used scull_setup_cdev(&scull_devices[i], i); } /* At this point call the init function for any friend device */ dev = MKDEV(scull_major, scull_minor + scull_nr_devs); /*debug*/ create_proc_read_entry("scullmem", 0 /* default mode */, NULL /* parent dir */, scull_read_procmem, NULL /* client data */); scull_cache = kmem_cache_create("scullc",scull_quantum, 0,SLAB_HWCACHE_ALIGN,NULL); if (!scull_cache) { scull_cleanup_module(); return -ENOMEM; } return 0; /* succeed */ fail: scull_cleanup_module(); return result; } module_init(scull_init_module); module_exit(scull_cleanup_module);
//scull.h #ifndef _SCULL_H_ #define _SCULL_H_ #include <linux/ioctl.h> /* needed for the _IOW etc stuff used later */ #ifndef SCULL_MAJOR #define SCULL_MAJOR 0 /* dynamic major by default */ #endif #ifndef SCULL_NR_DEVS #define SCULL_NR_DEVS 4 /* scull0 through scull3 */ #endif /* * The bare device is a variable-length region of memory. * Use a linked list of indirect blocks. * * "scull_dev->data" points to an array of pointers, each * pointer refers to a memory area of SCULL_QUANTUM bytes. * * The array (quantum-set) is SCULL_QSET long. */ #ifndef SCULL_QUANTUM #define SCULL_QUANTUM 4000 #endif #ifndef SCULL_QSET #define SCULL_QSET 1000 #endif /* * Representation of scull quantum sets. */ struct scull_qset { void **data; struct scull_qset *next; }; struct scull_dev { struct scull_qset *data; /* Pointer to first quantum set */ int quantum; /* the current quantum size */ int qset; /* the current array size */ unsigned long size; /* amount of data stored here */ unsigned int access_key; /* used by sculluid and scullpriv */ struct semaphore sem; /* mutual exclusion semaphore */ struct cdev cdev; /* Char device structure */ }; /* * The different configurable parameters */ extern int scull_major; /* main.c */ extern int scull_nr_devs; extern int scull_quantum; extern int scull_qset; /* * Prototypes for shared functions */ int scull_access_init(dev_t dev); void scull_access_cleanup(void); int scull_trim(struct scull_dev *dev); ssize_t scull_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos); ssize_t scull_write(struct file *filp, const char __user *buf, size_t count, loff_t *f_pos); loff_t scull_llseek(struct file *filp, loff_t off, int whence); int scull_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg); /* * Ioctl definitions */ /* Use 'k' as magic number */ #define SCULL_IOC_MAGIC 'k' /* Please use a different 8-bit number in your code */ #define SCULL_IOCRESET _IO(SCULL_IOC_MAGIC, 0) /* * S means "Set" through a ptr, * T means "Tell" directly with the argument value * G means "Get": reply by setting through a pointer * Q means "Query": response is on the return value * X means "eXchange": switch G and S atomically * H means "sHift": switch T and Q atomically */ #define SCULL_IOCSQUANTUM _IOW(SCULL_IOC_MAGIC, 1, int) #define SCULL_IOCSQSET _IOW(SCULL_IOC_MAGIC, 2, int) #define SCULL_IOCTQUANTUM _IO(SCULL_IOC_MAGIC, 3) #define SCULL_IOCTQSET _IO(SCULL_IOC_MAGIC, 4) #define SCULL_IOCGQUANTUM _IOR(SCULL_IOC_MAGIC, 5, int) #define SCULL_IOCGQSET _IOR(SCULL_IOC_MAGIC, 6, int) #define SCULL_IOCQQUANTUM _IO(SCULL_IOC_MAGIC, 7) #define SCULL_IOCQQSET _IO(SCULL_IOC_MAGIC, 8) #define SCULL_IOCXQUANTUM _IOWR(SCULL_IOC_MAGIC, 9, int) #define SCULL_IOCXQSET _IOWR(SCULL_IOC_MAGIC,10, int) #define SCULL_IOCHQUANTUM _IO(SCULL_IOC_MAGIC, 11) #define SCULL_IOCHQSET _IO(SCULL_IOC_MAGIC, 12) /* * The other entities only have "Tell" and "Query", because they're * not printed in the book, and there's no need to have all six. * (The previous stuff was only there to show different ways to do it. */ #define SCULL_P_IOCTSIZE _IO(SCULL_IOC_MAGIC, 13) #define SCULL_P_IOCQSIZE _IO(SCULL_IOC_MAGIC, 14) /* ... more to come */ #define SCULL_IOC_MAXNR 14 #endif /* _SCULL_H_ */
測試程序less
//test.c #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <unistd.h> //_exit #include <stdlib.h> // exit int main(int argc,char **argv) { int fd,size; char s[] = "Hello World"; char buffer[80] = {"\0"}; if((fd = open("/dev/scull0",O_RDWR)) < 0 ){ printf("errno = %d\n",errno); exit(0); } size = write(fd,s,sizeof(s)); printf("write in %d bytes\n",size); close(fd); fd = open("/dev/scull0",O_RDWR); size = read(fd,buffer,sizeof(buffer)); printf("read out %d bytes\n",size); printf("%s\n",buffer); sleep(100); close(fd); return 0; }
3. 內存池ide
4. get_free_page測試
實例:ui
驅動程序this
//scullp.h #include <linux/ioctl.h> #include <linux/cdev.h> #define SCULLP_MAJOR 0 /* dynamic major by default */ #define SCULLP_DEVS 4 /* scullp0 through scullp3 */ /* * The bare device is a variable-length region of memory. * Use a linked list of indirect blocks. * * "scullp_dev->data" points to an array of pointers, each * pointer refers to a memory page. * * The array (quantum-set) is SCULLP_QSET long. */ #define SCULLP_ORDER 0 /* one page at a time */ #define SCULLP_QSET 500 struct scullp_dev { void **data; struct scullp_dev *next; /* next listitem */ int vmas; /* active mappings */ int order; /* the current allocation order */ int qset; /* the current array size */ size_t size; /* 32-bit will suffice */ struct semaphore sem; /* Mutual exclusion */ struct cdev cdev; }; extern struct scullp_dev *scullp_devices; extern struct file_operations scullp_fops; /* * The different configurable parameters */ extern int scullp_major; /* main.c */ extern int scullp_devs; extern int scullp_order; extern int scullp_qset; /* * Prototypes for shared functions */ int scullp_trim(struct scullp_dev *dev); struct scullp_dev *scullp_follow(struct scullp_dev *dev, int n); /* * Ioctl definitions */ /* Use 'K' as magic number */ #define SCULLP_IOC_MAGIC 'K' #define SCULLP_IOCRESET _IO(SCULLP_IOC_MAGIC, 0) /* * S means "Set" through a ptr, * T means "Tell" directly * G means "Get" (to a pointed var) * Q means "Query", response is on the return value * X means "eXchange": G and S atomically * H means "sHift": T and Q atomically */ #define SCULLP_IOCSORDER _IOW(SCULLP_IOC_MAGIC, 1, int) #define SCULLP_IOCTORDER _IO(SCULLP_IOC_MAGIC, 2) #define SCULLP_IOCGORDER _IOR(SCULLP_IOC_MAGIC, 3, int) #define SCULLP_IOCQORDER _IO(SCULLP_IOC_MAGIC, 4) #define SCULLP_IOCXORDER _IOWR(SCULLP_IOC_MAGIC, 5, int) #define SCULLP_IOCHORDER _IO(SCULLP_IOC_MAGIC, 6) #define SCULLP_IOCSQSET _IOW(SCULLP_IOC_MAGIC, 7, int) #define SCULLP_IOCTQSET _IO(SCULLP_IOC_MAGIC, 8) #define SCULLP_IOCGQSET _IOR(SCULLP_IOC_MAGIC, 9, int) #define SCULLP_IOCQQSET _IO(SCULLP_IOC_MAGIC, 10) #define SCULLP_IOCXQSET _IOWR(SCULLP_IOC_MAGIC,11, int) #define SCULLP_IOCHQSET _IO(SCULLP_IOC_MAGIC, 12) #define SCULLP_IOC_MAXNR 12
//scullp.c //#include <linux/config.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/kernel.h> /* printk() */ #include <linux/slab.h> /* kmalloc() */ #include <linux/fs.h> /* everything... */ #include <linux/errno.h> /* error codes */ #include <linux/types.h> /* size_t */ #include <linux/proc_fs.h> #include <linux/fcntl.h> /* O_ACCMODE */ #include <linux/aio.h> #include <asm/uaccess.h> #include "scullp.h" /* local definitions */ int scullp_major = SCULLP_MAJOR; int scullp_devs = SCULLP_DEVS; /* number of bare scullp devices */ int scullp_qset = SCULLP_QSET; int scullp_order = SCULLP_ORDER; module_param(scullp_major, int, 0); module_param(scullp_devs, int, 0); module_param(scullp_qset, int, 0); module_param(scullp_order, int, 0); MODULE_AUTHOR("Alessandro Rubini"); MODULE_LICENSE("Dual BSD/GPL"); struct scullp_dev *scullp_devices; /* allocated in scullp_init */ int scullp_trim(struct scullp_dev *dev); void scullp_cleanup(void); /* * Open and close */ int scullp_open (struct inode *inode, struct file *filp) { struct scullp_dev *dev; /* device information */ /* Find the device */ dev = container_of(inode->i_cdev, struct scullp_dev, cdev); /* now trim to 0 the length of the device if open was write-only */ if ( (filp->f_flags & O_ACCMODE) == O_WRONLY) { if (down_interruptible (&dev->sem)) return -ERESTARTSYS; scullp_trim(dev); /* ignore errors */ up (&dev->sem); } /* and use filp->private_data to point to the device data */ filp->private_data = dev; return 0; /* success */ } int scullp_release (struct inode *inode, struct file *filp) { return 0; } /* * Follow the list */ struct scullp_dev *scullp_follow(struct scullp_dev *dev, int n) { while (n--) { if (!dev->next) { dev->next = kmalloc(sizeof(struct scullp_dev), GFP_KERNEL); memset(dev->next, 0, sizeof(struct scullp_dev)); } dev = dev->next; continue; } return dev; } /* * Data management: read and write */ ssize_t scullp_read (struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct scullp_dev *dev = filp->private_data; /* the first listitem */ struct scullp_dev *dptr; int quantum = PAGE_SIZE << dev->order; int qset = dev->qset; int itemsize = quantum * qset; /* how many bytes in the listitem */ int item, s_pos, q_pos, rest; ssize_t retval = 0; if (down_interruptible (&dev->sem)) return -ERESTARTSYS; if (*f_pos > dev->size) goto nothing; if (*f_pos + count > dev->size) count = dev->size - *f_pos; /* find listitem, qset index, and offset in the quantum */ item = ((long) *f_pos) / itemsize; rest = ((long) *f_pos) % itemsize; s_pos = rest / quantum; q_pos = rest % quantum; /* follow the list up to the right position (defined elsewhere) */ dptr = scullp_follow(dev, item); if (!dptr->data) goto nothing; /* don't fill holes */ if (!dptr->data[s_pos]) goto nothing; if (count > quantum - q_pos) count = quantum - q_pos; /* read only up to the end of this quantum */ if (copy_to_user (buf, dptr->data[s_pos]+q_pos, count)) { retval = -EFAULT; goto nothing; } up (&dev->sem); *f_pos += count; return count; nothing: up (&dev->sem); return retval; } ssize_t scullp_write (struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct scullp_dev *dev = filp->private_data; struct scullp_dev *dptr; int quantum = PAGE_SIZE << dev->order; int qset = dev->qset; int itemsize = quantum * qset; int item, s_pos, q_pos, rest; ssize_t retval = -ENOMEM; /* our most likely error */ if (down_interruptible (&dev->sem)) return -ERESTARTSYS; /* find listitem, qset index and offset in the quantum */ item = ((long) *f_pos) / itemsize; rest = ((long) *f_pos) % itemsize; s_pos = rest / quantum; q_pos = rest % quantum; /* follow the list up to the right position */ dptr = scullp_follow(dev, item); if (!dptr->data) { dptr->data = kmalloc(qset * sizeof(void *), GFP_KERNEL); if (!dptr->data) goto nomem; memset(dptr->data, 0, qset * sizeof(char *)); } /* Here's the allocation of a single quantum */ if (!dptr->data[s_pos]) { dptr->data[s_pos] = (void *)__get_free_pages(GFP_KERNEL, dptr->order); if (!dptr->data[s_pos]) goto nomem; memset(dptr->data[s_pos], 0, PAGE_SIZE << dptr->order); } if (count > quantum - q_pos) count = quantum - q_pos; /* write only up to the end of this quantum */ if (copy_from_user (dptr->data[s_pos]+q_pos, buf, count)) { retval = -EFAULT; goto nomem; } *f_pos += count; /* update the size */ if (dev->size < *f_pos) dev->size = *f_pos; up (&dev->sem); return count; nomem: up (&dev->sem); return retval; } /* * The ioctl() implementation */ int scullp_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int err = 0, ret = 0, tmp; /* don't even decode wrong cmds: better returning ENOTTY than EFAULT */ if (_IOC_TYPE(cmd) != SCULLP_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SCULLP_IOC_MAXNR) return -ENOTTY; /* * the type is a bitmask, and VERIFY_WRITE catches R/W * transfers. Note that the type is user-oriented, while * verify_area is kernel-oriented, so the concept of "read" and * "write" is reversed */ if (_IOC_DIR(cmd) & _IOC_READ) err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); if (err) return -EFAULT; switch(cmd) { case SCULLP_IOCRESET: scullp_qset = SCULLP_QSET; scullp_order = SCULLP_ORDER; break; case SCULLP_IOCSORDER: /* Set: arg points to the value */ ret = __get_user(scullp_order, (int __user *) arg); break; case SCULLP_IOCTORDER: /* Tell: arg is the value */ scullp_order = arg; break; case SCULLP_IOCGORDER: /* Get: arg is pointer to result */ ret = __put_user (scullp_order, (int __user *) arg); break; case SCULLP_IOCQORDER: /* Query: return it (it's positive) */ return scullp_order; case SCULLP_IOCXORDER: /* eXchange: use arg as pointer */ tmp = scullp_order; ret = __get_user(scullp_order, (int __user *) arg); if (ret == 0) ret = __put_user(tmp, (int __user *) arg); break; case SCULLP_IOCHORDER: /* sHift: like Tell + Query */ tmp = scullp_order; scullp_order = arg; return tmp; case SCULLP_IOCSQSET: ret = __get_user(scullp_qset, (int __user *) arg); break; case SCULLP_IOCTQSET: scullp_qset = arg; break; case SCULLP_IOCGQSET: ret = __put_user(scullp_qset, (int __user *)arg); break; case SCULLP_IOCQQSET: return scullp_qset; case SCULLP_IOCXQSET: tmp = scullp_qset; ret = __get_user(scullp_qset, (int __user *)arg); if (ret == 0) ret = __put_user(tmp, (int __user *)arg); break; case SCULLP_IOCHQSET: tmp = scullp_qset; scullp_qset = arg; return tmp; default: /* redundant, as cmd was checked against MAXNR */ return -ENOTTY; } return ret; } /* * The "extended" operations */ loff_t scullp_llseek (struct file *filp, loff_t off, int whence) { struct scullp_dev *dev = filp->private_data; long newpos; switch(whence) { case 0: /* SEEK_SET */ newpos = off; break; case 1: /* SEEK_CUR */ newpos = filp->f_pos + off; break; case 2: /* SEEK_END */ newpos = dev->size + off; break; default: /* can't happen */ return -EINVAL; } if (newpos<0) return -EINVAL; filp->f_pos = newpos; return newpos; } /* * The fops */ struct file_operations scullp_fops = { .owner = THIS_MODULE, .llseek = scullp_llseek, .read = scullp_read, .write = scullp_write, .ioctl = scullp_ioctl, // .mmap = scullp_mmap, .open = scullp_open, .release = scullp_release, // .aio_read = scullp_aio_read, // .aio_write = scullp_aio_write, }; int scullp_trim(struct scullp_dev *dev) { struct scullp_dev *next, *dptr; int qset = dev->qset; /* "dev" is not-null */ int i; if (dev->vmas) /* don't trim: there are active mappings */ return -EBUSY; for (dptr = dev; dptr; dptr = next) { /* all the list items */ if (dptr->data) { /* This code frees a whole quantum-set */ for (i = 0; i < qset; i++) if (dptr->data[i]) free_pages((unsigned long)(dptr->data[i]), dptr->order); kfree(dptr->data); dptr->data=NULL; } next=dptr->next; if (dptr != dev) kfree(dptr); /* all of them but the first */ } dev->size = 0; dev->qset = scullp_qset; dev->order = scullp_order; dev->next = NULL; return 0; } static void scullp_setup_cdev(struct scullp_dev *dev, int index) { int err, devno = MKDEV(scullp_major, index); cdev_init(&dev->cdev, &scullp_fops); dev->cdev.owner = THIS_MODULE; dev->cdev.ops = &scullp_fops; err = cdev_add (&dev->cdev, devno, 1); /* Fail gracefully if need be */ if (err) printk(KERN_NOTICE "Error %d adding scull%d", err, index); } /* * Finally, the module stuff */ int scullp_init(void) { int result, i; dev_t dev = MKDEV(scullp_major, 0); /* * Register your major, and accept a dynamic number. */ if (scullp_major) result = register_chrdev_region(dev, scullp_devs, "scullp"); else { result = alloc_chrdev_region(&dev, 0, scullp_devs, "scullp"); scullp_major = MAJOR(dev); } if (result < 0) return result; /* * allocate the devices -- we can't have them static, as the number * can be specified at load time */ scullp_devices = kmalloc(scullp_devs*sizeof (struct scullp_dev), GFP_KERNEL); if (!scullp_devices) { result = -ENOMEM; goto fail_malloc; } memset(scullp_devices, 0, scullp_devs*sizeof (struct scullp_dev)); for (i = 0; i < scullp_devs; i++) { scullp_devices[i].order = scullp_order; scullp_devices[i].qset = scullp_qset; sema_init (&scullp_devices[i].sem, 1); scullp_setup_cdev(scullp_devices + i, i); } #ifdef SCULLP_USE_PROC /* only when available */ create_proc_read_entry("scullpmem", 0, NULL, scullp_read_procmem, NULL); #endif return 0; /* succeed */ fail_malloc: unregister_chrdev_region(dev, scullp_devs); return result; } void scullp_cleanup(void) { int i; #ifdef SCULLP_USE_PROC remove_proc_entry("scullpmem", NULL); #endif for (i = 0; i < scullp_devs; i++) { cdev_del(&scullp_devices[i].cdev); scullp_trim(scullp_devices + i); } kfree(scullp_devices); unregister_chrdev_region(MKDEV (scullp_major, 0), scullp_devs); } module_init(scullp_init); module_exit(scullp_cleanup);
測試程序atom
//test.c #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <unistd.h> //_exit #include <stdlib.h> // exit int main(int argc,char **argv) { int fd,size; char s[] = "Hello World"; char buffer[80] = {"\0"}; if((fd = open("/dev/scullp",O_RDWR)) < 0 ){ printf("errno = %d\n",errno); exit(0); } size = write(fd,s,sizeof(s)); printf("write in %d bytes\n",size); close(fd); fd = open("/dev/scullp",O_RDWR); size = read(fd,buffer,sizeof(buffer)); printf("read out %d bytes\n",size); printf("%s\n",buffer); sleep(100); close(fd); return 0; }
5. vmalloc
//scullv.h #include <linux/ioctl.h> #include <linux/cdev.h> #define SCULLP_MAJOR 0 /* dynamic major by default */ #define SCULLP_DEVS 4 /* scullp0 through scullp3 */ /* * The bare device is a variable-length region of memory. * Use a linked list of indirect blocks. * * "scullp_dev->data" points to an array of pointers, each * pointer refers to a memory page. * * The array (quantum-set) is SCULLP_QSET long. */ #define SCULLP_ORDER 0 /* one page at a time */ #define SCULLP_QSET 500 struct scullp_dev { void **data; struct scullp_dev *next; /* next listitem */ int vmas; /* active mappings */ int order; /* the current allocation order */ int qset; /* the current array size */ size_t size; /* 32-bit will suffice */ struct semaphore sem; /* Mutual exclusion */ struct cdev cdev; }; extern struct scullp_dev *scullp_devices; extern struct file_operations scullp_fops; /* * The different configurable parameters */ extern int scullp_major; /* main.c */ extern int scullp_devs; extern int scullp_order; extern int scullp_qset; /* * Prototypes for shared functions */ int scullp_trim(struct scullp_dev *dev); struct scullp_dev *scullp_follow(struct scullp_dev *dev, int n); /* * Ioctl definitions */ /* Use 'K' as magic number */ #define SCULLP_IOC_MAGIC 'K' #define SCULLP_IOCRESET _IO(SCULLP_IOC_MAGIC, 0) /* * S means "Set" through a ptr, * T means "Tell" directly * G means "Get" (to a pointed var) * Q means "Query", response is on the return value * X means "eXchange": G and S atomically * H means "sHift": T and Q atomically */ #define SCULLP_IOCSORDER _IOW(SCULLP_IOC_MAGIC, 1, int) #define SCULLP_IOCTORDER _IO(SCULLP_IOC_MAGIC, 2) #define SCULLP_IOCGORDER _IOR(SCULLP_IOC_MAGIC, 3, int) #define SCULLP_IOCQORDER _IO(SCULLP_IOC_MAGIC, 4) #define SCULLP_IOCXORDER _IOWR(SCULLP_IOC_MAGIC, 5, int) #define SCULLP_IOCHORDER _IO(SCULLP_IOC_MAGIC, 6) #define SCULLP_IOCSQSET _IOW(SCULLP_IOC_MAGIC, 7, int) #define SCULLP_IOCTQSET _IO(SCULLP_IOC_MAGIC, 8) #define SCULLP_IOCGQSET _IOR(SCULLP_IOC_MAGIC, 9, int) #define SCULLP_IOCQQSET _IO(SCULLP_IOC_MAGIC, 10) #define SCULLP_IOCXQSET _IOWR(SCULLP_IOC_MAGIC,11, int) #define SCULLP_IOCHQSET _IO(SCULLP_IOC_MAGIC, 12) #define SCULLP_IOC_MAXNR 12
//scullv.c //#include <linux/config.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/kernel.h> /* printk() */ #include <linux/slab.h> /* kmalloc() */ #include <linux/fs.h> /* everything... */ #include <linux/errno.h> /* error codes */ #include <linux/types.h> /* size_t */ #include <linux/proc_fs.h> #include <linux/fcntl.h> /* O_ACCMODE */ #include <linux/aio.h> #include <asm/uaccess.h> #include <linux/vmalloc.h> #include "scullv.h" /* local definitions */ int scullp_major = SCULLP_MAJOR; int scullp_devs = SCULLP_DEVS; /* number of bare scullp devices */ int scullp_qset = SCULLP_QSET; int scullp_order = SCULLP_ORDER; module_param(scullp_major, int, 0); module_param(scullp_devs, int, 0); module_param(scullp_qset, int, 0); module_param(scullp_order, int, 0); MODULE_AUTHOR("Alessandro Rubini"); MODULE_LICENSE("Dual BSD/GPL"); struct scullp_dev *scullp_devices; /* allocated in scullp_init */ int scullp_trim(struct scullp_dev *dev); void scullp_cleanup(void); /* * Open and close */ int scullp_open (struct inode *inode, struct file *filp) { struct scullp_dev *dev; /* device information */ /* Find the device */ dev = container_of(inode->i_cdev, struct scullp_dev, cdev); /* now trim to 0 the length of the device if open was write-only */ if ( (filp->f_flags & O_ACCMODE) == O_WRONLY) { if (down_interruptible (&dev->sem)) return -ERESTARTSYS; scullp_trim(dev); /* ignore errors */ up (&dev->sem); } /* and use filp->private_data to point to the device data */ filp->private_data = dev; return 0; /* success */ } int scullp_release (struct inode *inode, struct file *filp) { return 0; } /* * Follow the list */ struct scullp_dev *scullp_follow(struct scullp_dev *dev, int n) { while (n--) { if (!dev->next) { dev->next = kmalloc(sizeof(struct scullp_dev), GFP_KERNEL); memset(dev->next, 0, sizeof(struct scullp_dev)); } dev = dev->next; continue; } return dev; } /* * Data management: read and write */ ssize_t scullp_read (struct file *filp, char __user *buf, size_t count, loff_t *f_pos) { struct scullp_dev *dev = filp->private_data; /* the first listitem */ struct scullp_dev *dptr; int quantum = PAGE_SIZE << dev->order; int qset = dev->qset; int itemsize = quantum * qset; /* how many bytes in the listitem */ int item, s_pos, q_pos, rest; ssize_t retval = 0; if (down_interruptible (&dev->sem)) return -ERESTARTSYS; if (*f_pos > dev->size) goto nothing; if (*f_pos + count > dev->size) count = dev->size - *f_pos; /* find listitem, qset index, and offset in the quantum */ item = ((long) *f_pos) / itemsize; rest = ((long) *f_pos) % itemsize; s_pos = rest / quantum; q_pos = rest % quantum; /* follow the list up to the right position (defined elsewhere) */ dptr = scullp_follow(dev, item); if (!dptr->data) goto nothing; /* don't fill holes */ if (!dptr->data[s_pos]) goto nothing; if (count > quantum - q_pos) count = quantum - q_pos; /* read only up to the end of this quantum */ if (copy_to_user (buf, dptr->data[s_pos]+q_pos, count)) { retval = -EFAULT; goto nothing; } up (&dev->sem); *f_pos += count; return count; nothing: up (&dev->sem); return retval; } ssize_t scullp_write (struct file *filp, const char __user *buf, size_t count, loff_t *f_pos) { struct scullp_dev *dev = filp->private_data; struct scullp_dev *dptr; int quantum = PAGE_SIZE << dev->order; int qset = dev->qset; int itemsize = quantum * qset; int item, s_pos, q_pos, rest; ssize_t retval = -ENOMEM; /* our most likely error */ if (down_interruptible (&dev->sem)) return -ERESTARTSYS; /* find listitem, qset index and offset in the quantum */ item = ((long) *f_pos) / itemsize; rest = ((long) *f_pos) % itemsize; s_pos = rest / quantum; q_pos = rest % quantum; /* follow the list up to the right position */ dptr = scullp_follow(dev, item); if (!dptr->data) { dptr->data = kmalloc(qset * sizeof(void *), GFP_KERNEL); if (!dptr->data) goto nomem; memset(dptr->data, 0, qset * sizeof(char *)); } /* Here's the allocation of a single quantum */ /* if (!dptr->data[s_pos]) { dptr->data[s_pos] = (void *)__get_free_pages(GFP_KERNEL, dptr->order); if (!dptr->data[s_pos]) goto nomem; memset(dptr->data[s_pos], 0, PAGE_SIZE << dptr->order); } */ if (!dptr->data[s_pos]) { dptr->data[s_pos] = (void *)vmalloc(PAGE_SIZE << dptr->order); if (!dptr->data[s_pos]) goto nomem; memset(dptr->data[s_pos],0,PAGE_SIZE << dptr->order); } if (count > quantum - q_pos) count = quantum - q_pos; /* write only up to the end of this quantum */ if (copy_from_user (dptr->data[s_pos]+q_pos, buf, count)) { retval = -EFAULT; goto nomem; } *f_pos += count; /* update the size */ if (dev->size < *f_pos) dev->size = *f_pos; up (&dev->sem); return count; nomem: up (&dev->sem); return retval; } /* * The ioctl() implementation */ int scullp_ioctl (struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg) { int err = 0, ret = 0, tmp; /* don't even decode wrong cmds: better returning ENOTTY than EFAULT */ if (_IOC_TYPE(cmd) != SCULLP_IOC_MAGIC) return -ENOTTY; if (_IOC_NR(cmd) > SCULLP_IOC_MAXNR) return -ENOTTY; /* * the type is a bitmask, and VERIFY_WRITE catches R/W * transfers. Note that the type is user-oriented, while * verify_area is kernel-oriented, so the concept of "read" and * "write" is reversed */ if (_IOC_DIR(cmd) & _IOC_READ) err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); else if (_IOC_DIR(cmd) & _IOC_WRITE) err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); if (err) return -EFAULT; switch(cmd) { case SCULLP_IOCRESET: scullp_qset = SCULLP_QSET; scullp_order = SCULLP_ORDER; break; case SCULLP_IOCSORDER: /* Set: arg points to the value */ ret = __get_user(scullp_order, (int __user *) arg); break; case SCULLP_IOCTORDER: /* Tell: arg is the value */ scullp_order = arg; break; case SCULLP_IOCGORDER: /* Get: arg is pointer to result */ ret = __put_user (scullp_order, (int __user *) arg); break; case SCULLP_IOCQORDER: /* Query: return it (it's positive) */ return scullp_order; case SCULLP_IOCXORDER: /* eXchange: use arg as pointer */ tmp = scullp_order; ret = __get_user(scullp_order, (int __user *) arg); if (ret == 0) ret = __put_user(tmp, (int __user *) arg); break; case SCULLP_IOCHORDER: /* sHift: like Tell + Query */ tmp = scullp_order; scullp_order = arg; return tmp; case SCULLP_IOCSQSET: ret = __get_user(scullp_qset, (int __user *) arg); break; case SCULLP_IOCTQSET: scullp_qset = arg; break; case SCULLP_IOCGQSET: ret = __put_user(scullp_qset, (int __user *)arg); break; case SCULLP_IOCQQSET: return scullp_qset; case SCULLP_IOCXQSET: tmp = scullp_qset; ret = __get_user(scullp_qset, (int __user *)arg); if (ret == 0) ret = __put_user(tmp, (int __user *)arg); break; case SCULLP_IOCHQSET: tmp = scullp_qset; scullp_qset = arg; return tmp; default: /* redundant, as cmd was checked against MAXNR */ return -ENOTTY; } return ret; } /* * The "extended" operations */ loff_t scullp_llseek (struct file *filp, loff_t off, int whence) { struct scullp_dev *dev = filp->private_data; long newpos; switch(whence) { case 0: /* SEEK_SET */ newpos = off; break; case 1: /* SEEK_CUR */ newpos = filp->f_pos + off; break; case 2: /* SEEK_END */ newpos = dev->size + off; break; default: /* can't happen */ return -EINVAL; } if (newpos<0) return -EINVAL; filp->f_pos = newpos; return newpos; } /* * The fops */ struct file_operations scullp_fops = { .owner = THIS_MODULE, .llseek = scullp_llseek, .read = scullp_read, .write = scullp_write, .ioctl = scullp_ioctl, // .mmap = scullp_mmap, .open = scullp_open, .release = scullp_release, // .aio_read = scullp_aio_read, // .aio_write = scullp_aio_write, }; int scullp_trim(struct scullp_dev *dev) { struct scullp_dev *next, *dptr; int qset = dev->qset; /* "dev" is not-null */ int i; if (dev->vmas) /* don't trim: there are active mappings */ return -EBUSY; for (dptr = dev; dptr; dptr = next) { /* all the list items */ if (dptr->data) { /* This code frees a whole quantum-set */ for (i = 0; i < qset; i++) if (dptr->data[i]) vfree(dptr->data[i]); kfree(dptr->data); dptr->data=NULL; } next=dptr->next; if (dptr != dev) kfree(dptr); /* all of them but the first */ } dev->size = 0; dev->qset = scullp_qset; dev->order = scullp_order; dev->next = NULL; return 0; } static void scullp_setup_cdev(struct scullp_dev *dev, int index) { int err, devno = MKDEV(scullp_major, index); cdev_init(&dev->cdev, &scullp_fops); dev->cdev.owner = THIS_MODULE; dev->cdev.ops = &scullp_fops; err = cdev_add (&dev->cdev, devno, 1); /* Fail gracefully if need be */ if (err) printk(KERN_NOTICE "Error %d adding scull%d", err, index); } /* * Finally, the module stuff */ int scullp_init(void) { int result, i; dev_t dev = MKDEV(scullp_major, 0); /* * Register your major, and accept a dynamic number. */ if (scullp_major) result = register_chrdev_region(dev, scullp_devs, "scullp"); else { result = alloc_chrdev_region(&dev, 0, scullp_devs, "scullp"); scullp_major = MAJOR(dev); } if (result < 0) return result; /* * allocate the devices -- we can't have them static, as the number * can be specified at load time */ scullp_devices = kmalloc(scullp_devs*sizeof (struct scullp_dev), GFP_KERNEL); if (!scullp_devices) { result = -ENOMEM; goto fail_malloc; } memset(scullp_devices, 0, scullp_devs*sizeof (struct scullp_dev)); for (i = 0; i < scullp_devs; i++) { scullp_devices[i].order = scullp_order; scullp_devices[i].qset = scullp_qset; sema_init (&scullp_devices[i].sem, 1); scullp_setup_cdev(scullp_devices + i, i); } #ifdef SCULLP_USE_PROC /* only when available */ create_proc_read_entry("scullpmem", 0, NULL, scullp_read_procmem, NULL); #endif return 0; /* succeed */ fail_malloc: unregister_chrdev_region(dev, scullp_devs); return result; } void scullp_cleanup(void) { int i; #ifdef SCULLP_USE_PROC remove_proc_entry("scullpmem", NULL); #endif for (i = 0; i < scullp_devs; i++) { cdev_del(&scullp_devices[i].cdev); scullp_trim(scullp_devices + i); } kfree(scullp_devices); unregister_chrdev_region(MKDEV (scullp_major, 0), scullp_devs); } module_init(scullp_init); module_exit(scullp_cleanup);
測試程序
//test.c #include <stdio.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <unistd.h> //_exit #include <stdlib.h> // exit int main(int argc,char **argv) { int fd,size; char s[] = "Hello World"; char buffer[80] = {"\0"}; if((fd = open("/dev/scullp",O_RDWR)) < 0 ){ printf("errno = %d\n",errno); exit(0); } size = write(fd,s,sizeof(s)); printf("write in %d bytes\n",size); close(fd); fd = open("/dev/scullp",O_RDWR); size = read(fd,buffer,sizeof(buffer)); printf("read out %d bytes\n",size); printf("%s\n",buffer); sleep(100); close(fd); return 0; }
以上各個分配方法中有的涉及到了頁表的修改,又得與物理地址只是一個offset 的偏移,不一樣的方法各有適用的場合,在實際的驅動編寫的過程當中應仔細選擇。