信號量(semaphore)是操做系統中最典型的用於同步和互斥的手段,信號量的值能夠是 0,1 或者 n。信號量與操做系統的經典概念 PV 操做對應。node
1 /* Please don't access any members of this structure directly 2 * 定義信號量:struct semaphore sem; 3 */ 4 struct semaphore { 5 raw_spinlock_t lock; 6 unsigned int count; 7 struct list_head wait_list; 8 };
1 /** 2 * 初始化信號量 3 * 該函數初始化信號量,並設置信號量 sem 的值爲 val 4 */ 5 static inline void sema_init(struct semaphore *sem, int val) 6 { 7 static struct lock_class_key __key; 8 *sem = (struct semaphore) __SEMAPHORE_INITIALIZER(*sem, val); 9 lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0); 10 }
1 /** 2 * 得到信號量: 3 * 用於得到信號量 sem,它會致使睡眠,所以不能再中斷上下文中使用 4 * 進入睡眠狀態的進程不能被信號打斷 5 */ 6 extern void down(struct semaphore *sem);
1 /** 2 * 得到信號量: 3 * 與 down 函數相似,不一樣的是進入睡眠狀態的進程能被信號打斷, 4 * 信號也會致使該函數返回,這時候函數的返回值非 0 5 * 在使用此函數的時候,對返回值通常會進行檢查,若是非 0,一般當即返回 -ERESTARTSYS 6 * if(down_interruptible(&sem)) 7 * return -ERESTARTSYS; 8 */ 9 extern int __must_check down_interruptible(struct semaphore *sem);
1 /** 2 * 得到信號量: 3 * 嘗試得到信號量,若是可以馬上得到,它就得到信號量並返回 0,不然返回非 0 值 4 * 它不會致使調用者睡眠,能夠再中斷上下文中使用 5 */ 6 extern int __must_check down_trylock(struct semaphore *sem);
1 /** 2 * 釋放信號量,喚醒等待者 3 */ 4 extern void up(struct semaphore *sem);
信號量做爲一種可能互斥手段,能夠保護臨界區,使用方式與自旋鎖相似。與自旋鎖相同的是,只有獲得信號量的值才能執行臨界區的代碼。linux
與自旋鎖不一樣的是,當獲取不到信號量時,進程不會原地打轉而是進入休眠等待狀態。併發
用做互斥時,信號量通常這樣使用:函數
由於內核更傾向於直接使用 mutex 做爲互斥手段,因此不推薦信號量如此使用。ui
信號量更適合用於同步,好比具體數值的生產者\消費者問題。this
一個進程 A 執行 down() 等待信號量,另外一個進程 B 執行 up() 釋放信號量,這樣進程 A 就同步等待了進程 B。atom
1 /** 2 * 定義互斥體:struct mutex my_mutex; 3 */ 4 struct mutex { 5 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 6 atomic_t count; 7 spinlock_t wait_lock; 8 struct list_head wait_list; 9 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_MUTEX_SPIN_ON_OWNER) 10 struct task_struct *owner; 11 #endif 12 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER 13 struct optimistic_spin_queue osq; /* Spinner MCS lock */ 14 #endif 15 #ifdef CONFIG_DEBUG_MUTEXES 16 void *magic; 17 #endif 18 #ifdef CONFIG_DEBUG_LOCK_ALLOC 19 struct lockdep_map dep_map; 20 #endif 21 };
1 /** 2 * mutex_init - initialize the mutex 3 * 初始化互斥體:mutex_init(&my_mutex); 4 * @mutex: the mutex to be initialized 5 * 6 * Initialize the mutex to unlocked state. 7 * 8 * It is not allowed to initialize an already locked mutex. 9 */ 10 # define mutex_init(mutex) \ 11 do { \ 12 static struct lock_class_key __key; \ 13 \ 14 __mutex_init((mutex), #mutex, &__key); \ 15 } while (0)
1 /** 2 * 獲取互斥體 3 * 引發的睡眠不能被信號打斷。 4 */ 5 extern void mutex_lock(struct mutex *lock); 6 /** 7 * 獲取互斥體 8 * 引發的睡眠可被信號打斷。 9 */ 10 extern int __must_check mutex_lock_interruptible(struct mutex *lock);
1 /* 2 * 獲取互斥體 3 * 嘗試得到 mutex,獲取不到 mutex 時不會引發進程睡眠 4 * NOTE: mutex_trylock() follows the spin_trylock() convention, 5 * not the down_trylock() convention! 6 * 7 * Returns 1 if the mutex has been acquired successfully, and 0 on contention. 8 */ 9 extern int mutex_trylock(struct mutex *lock);
1 /** 釋放互斥體 */ 2 extern void mutex_unlock(struct mutex *lock);
用法:spa
1 /** 2 * 如何用互斥體: 3 * 定義互斥體 4 * struct mutex my_mutex; 5 * 初始化互斥體 6 * mutex_init(&my_mutex); 7 * 獲取互斥體 8 * mutex_lock(&my_mutex); 9 * 臨界資源 10 * ... 11 * 釋放 mutex 12 * mutex_unlock(&my_mutex); 13 */
完成量(completion)用於一個執行單元等待另外一個執行單元執行完某事。操作系統
1 /** 2 * 定義完成量:struct completion my_completion; 3 */ 4 struct completion { 5 unsigned int done; 6 wait_queue_head_t wait; 7 };
1 /** 2 * init_completion - Initialize a dynamically allocated completion 3 * 初始化完成量的值爲 0(即沒有完成的狀態) 4 * @x: pointer to completion structure that is to be initialized 5 * 6 * This inline function will initialize a dynamically created completion 7 * structure. 8 */ 9 static inline void init_completion(struct completion *x) 10 { 11 x->done = 0; 12 init_waitqueue_head(&x->wait); 13 }
1 /** 2 * reinit_completion - reinitialize a completion structure 3 * 從新初始化完成量的值爲 0(即沒有完成的狀態) 4 * @x: pointer to completion structure that is to be reinitialized 5 * 6 * This inline function should be used to reinitialize a completion structure so it can 7 * be reused. This is especially important after complete_all() is used. 8 */ 9 static inline void reinit_completion(struct completion *x) 10 { 11 x->done = 0; 12 }
1 /** 等待一個完成量被喚醒 */ 2 extern void wait_for_completion(struct completion *);
1 /** 喚醒完成量 */ 2 extern void complete(struct completion *); ///< 只喚醒一個等待的執行單元 3 extern void complete_all(struct completion *);///< 釋放全部等待同一完成量的執行單元
在 globalmem 的讀寫函數中,要調用 copy_from_user 等可能引發阻塞的函數,因此不能使用自旋鎖,宜使用互斥體。指針
1 #include <linux/module.h> 2 #include <linux/fs.h> 3 #include <linux/init.h> 4 #include <linux/cdev.h> 5 #include <linux/slab.h> 6 #include <linux/uaccess.h> 7 #include <linux/mutex.h> 8 9 #define GLOBALMEM_SIZE 0x1000 10 //#define MEM_CLEAR 0X1 11 #define GLOBALMEM_MAGIC 'g' 12 #define MEM_CLEAR _IO(GLOBALMEM_MAGIC, 0) 13 #define GLOBALMEM_MAJOR 230 14 #define DEVICE_NUMBER 10 15 16 static int globalmem_major = GLOBALMEM_MAJOR; 17 module_param(globalmem_major, int, S_IRUGO); 18 19 struct globalmem_dev { 20 struct cdev cdev; 21 unsigned char mem[GLOBALMEM_SIZE]; 22 struct mutex mutex; 23 }; 24 25 struct globalmem_dev *globalmem_devp; 26 27 /** 28 * 這裏涉及到私有數據的定義,大多數遵循將文件私有數據 pirvate_data 指向設備結構體, 29 * 再用 read write llseek ioctl 等函數經過 private_data 訪問設備結構體。 30 * 對於此驅動而言,私有數據的設置是在 open 函數中完成的 31 */ 32 static int globalmem_open(struct inode *inode, struct file *filp) 33 { 34 /** 35 * NOTA: 36 * container_of 的做用是經過結構體成員的指針找到對應結構體的指針。 37 * 第一個參數是結構體成員的指針 38 * 第二個參數是整個結構體的類型 39 * 第三個參數爲傳入的第一個參數(即結構體成員)的類型 40 * container_of 返回值爲整個結構體指針 41 */ 42 struct globalmem_dev *dev = container_of(inode->i_cdev, struct globalmem_dev, cdev); 43 filp->private_data = dev; 44 return 0; 45 } 46 47 static int globalmem_release(struct inode *inode, struct file *filp) 48 { 49 return 0; 50 } 51 52 static long globalmem_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 53 { 54 struct globalmem_dev *dev = filp->private_data; 55 56 switch(cmd){ 57 case MEM_CLEAR: 58 mutex_lock(&dev->mutex); 59 memset(dev->mem, 0, GLOBALMEM_SIZE); 60 printk(KERN_INFO "globalmem is set to zero\n"); 61 mutex_unlock(&dev->mutex); 62 break; 63 default: 64 return -EINVAL; 65 } 66 67 return 0; 68 } 69 70 static loff_t globalmem_llseek(struct file *filp, loff_t offset, int orig) 71 { 72 loff_t ret = 0; 73 switch(orig) { 74 case 0: /** 從文件開頭位置 seek */ 75 if(offset < 0){ 76 ret = -EINVAL; 77 break; 78 } 79 if((unsigned int)offset > GLOBALMEM_SIZE){ 80 ret = -EINVAL; 81 break; 82 } 83 filp->f_pos = (unsigned int)offset; 84 ret = filp->f_pos; 85 break; 86 case 1: /** 從文件當前位置開始 seek */ 87 if((filp->f_pos + offset) > GLOBALMEM_SIZE){ 88 ret = -EINVAL; 89 break; 90 } 91 if((filp->f_pos + offset) < 0){ 92 ret = -EINVAL; 93 break; 94 } 95 filp->f_pos += offset; 96 ret = filp->f_pos; 97 break; 98 default: 99 ret = -EINVAL; 100 break; 101 } 102 103 return ret; 104 } 105 106 static ssize_t globalmem_write(struct file *filp, const char __user *buf, size_t size, loff_t *ppos) 107 { 108 unsigned long p = *ppos; 109 unsigned int count = size; 110 int ret = 0; 111 struct globalmem_dev *dev = filp->private_data; 112 113 if(p >= GLOBALMEM_SIZE) 114 return 0; 115 if(count > GLOBALMEM_SIZE - p) 116 count = GLOBALMEM_SIZE - p; 117 118 mutex_lock(&dev->mutex); 119 if(copy_from_user(dev->mem + p, buf, count)) 120 ret = -EFAULT; 121 else { 122 123 *ppos += count; 124 ret = count; 125 printk(KERN_INFO "written %u bytes(s) from %lu\n", count, p); 126 } 127 mutex_unlock(&dev->mutex); 128 return ret; 129 } 130 131 /** 132 * *ppos 是要讀的位置相對於文件開頭的偏移,若是該偏移大於或等於 GLOBALMEM_SIZE,意味着已經獨到文件末尾 133 */ 134 static ssize_t globalmem_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos) 135 { 136 unsigned long p = *ppos; 137 unsigned int count = size; 138 int ret = 0; 139 struct globalmem_dev *dev = filp->private_data; 140 141 if(p >= GLOBALMEM_SIZE) 142 return 0; 143 if(count > GLOBALMEM_SIZE - p) 144 count = GLOBALMEM_SIZE - p; 145 146 mutex_lock(&dev->mutex); 147 if(copy_to_user(buf, dev->mem + p, count)) { 148 ret = -EFAULT; 149 } else { 150 *ppos += count; 151 ret = count; 152 printk(KERN_INFO "read %u bytes(s) from %lu\n", count, p); 153 } 154 155 mutex_unlock(&dev->mutex); 156 return ret; 157 } 158 159 static const struct file_operations globalmem_fops = { 160 .owner = THIS_MODULE, 161 .llseek = globalmem_llseek, 162 .read = globalmem_read, 163 .write = globalmem_write, 164 .unlocked_ioctl = globalmem_ioctl, 165 .open = globalmem_open, 166 .release = globalmem_release, 167 }; 168 169 170 /** 171 * @brief globalmem_setup_cdev 172 * 173 * @param dev 174 * @param index 次設備號 175 */ 176 static void globalmem_setup_cdev(struct globalmem_dev *dev, int index) 177 { 178 int err; 179 int devno = MKDEV(globalmem_major, index); 180 181 /** 使用 cdev_init 便是靜態初始化了 cdev */ 182 cdev_init(&dev->cdev, &globalmem_fops); 183 dev->cdev.owner = THIS_MODULE; 184 185 /** 設備編號範圍設置爲1,表示咱們只申請了一個設備 */ 186 err = cdev_add(&dev->cdev, devno, 1); 187 if(err) 188 printk(KERN_NOTICE "Error %d adding globalmem%d\n", err, index); 189 } 190 191 static int __init globalmem_init(void) 192 { 193 int ret; 194 int i; 195 dev_t devno = MKDEV(globalmem_major, 0); 196 197 if(globalmem_major) 198 ret = register_chrdev_region(devno, DEVICE_NUMBER, "globalmem"); 199 else { 200 ret = alloc_chrdev_region(&devno, 0, DEVICE_NUMBER, "gobalmem"); 201 globalmem_major = MAJOR(devno); 202 } 203 204 if(ret < 0) 205 return ret; 206 207 globalmem_devp = kzalloc(sizeof(struct globalmem_dev), GFP_KERNEL); 208 if(!globalmem_devp){ 209 ret = -ENOMEM; 210 goto fail_malloc; 211 } 212 213 mutex_init(&globalmem_devp->mutex); 214 for(i = 0; i < DEVICE_NUMBER; i++){ 215 globalmem_setup_cdev(globalmem_devp + i, i); 216 } 217 218 fail_malloc: 219 unregister_chrdev_region(devno, 1); 220 return ret; 221 } 222 223 static void __exit globalmem_exit(void) 224 { 225 int i; 226 for(i = 0; i < DEVICE_NUMBER; i++) { 227 cdev_del(&(globalmem_devp + i)->cdev); 228 } 229 kfree(globalmem_devp); 230 unregister_chrdev_region(MKDEV(globalmem_major, 0), 1); 231 } 232 233 module_init(globalmem_init); 234 module_exit(globalmem_exit);