Linux文件系統之sysfs
一:前言
在設備模型中,sysfs文件系統用來表示設備的結構.將設備的層次結構形象的反應到用戶空間中.用戶空間能夠修改sysfs中的文件屬性來修改設備的屬性值,今天咱們就來詳細分析一下,sysfs的實現.
二:sysfs的初始化和掛載
Sysfs文件系統的初始化是在sysfs_init()中完成的,代碼以下:
int __init sysfs_init(void)
{
int err = -ENOMEM;
//建立一個分配sysfs_dirent的cache
sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache",
sizeof(struct sysfs_dirent),
0, 0, NULL);
if (!sysfs_dir_cachep)
goto out;
err = sysfs_inode_init();
if (err)
goto out_err;
//註冊sysfs文件系統s
err = register_filesystem(&sysfs_fs_type);
if (!err) {
//掛載sysfs文件系統
sysfs_mount = kern_mount(&sysfs_fs_type);
if (IS_ERR(sysfs_mount)) {
printk(KERN_ERR "sysfs: could not mount!\n");
err = PTR_ERR(sysfs_mount);
sysfs_mount = NULL;
unregister_filesystem(&sysfs_fs_type);
goto out_err;
}
} else
goto out_err;
out:
return err;
out_err:
kmem_cache_destroy(sysfs_dir_cachep);
sysfs_dir_cachep = NULL;
goto out;
}
每一個kobject對應sysfs中的一個目錄,kobject的每一個屬性對應sysfs文件系統中的文件.
struct sysfs_dirent就是用來作kobject與dentry的互相轉換用的.它們的關係以下圖所示:
上圖表示的是一個kobject的層次結構.dentry的d_fsdata字段指定該結點所表示的sysfs_dirent.sysfs_dirent.s_parent表示它的父kobject. sysfs_dirent.s_sibling表示它的兄弟結點. sysfs_dirent.s_dir.children表示它所屬的子節點.
從上圖可知.若是要遍歷一個結點下面的子結點,只須要找到sysfs_dirent.s_dir.children結點
而後按着子節點的s_sibling域遍歷便可.
固然,有時候也須要從struct sysfs_dirent導出它所屬的dentry結點.咱們在代碼中遇到的時候再進行分析.
Sysfs文件系統的file_system_type定義以下:
static struct file_system_type sysfs_fs_type = {
.name = "sysfs",
.get_sb = sysfs_get_sb,
.kill_sb = kill_anon_super,
};
經過前面文件系統的相關分析,咱們知道在sys_mount()中最終會調用struct file_system_type的get_sb函數來實現文件系統的掛載.它的代碼以下:
static int sysfs_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
return get_sb_single(fs_type, flags, data, sysfs_fill_super, mnt);
}
get_sb_single()的代碼在前面已經涉及到,它對super_block.以及掛載的dentry和inode的賦值是在回調函數sysfs_fill_super, mnt()中完成的.代碼以下:
static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
{
struct inode *inode;
struct dentry *root;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = SYSFS_MAGIC;
sb->s_op = &sysfs_ops;
sb->s_time_gran = 1;
sysfs_sb = sb;
/* get root inode, initialize and unlock it */
inode = sysfs_get_inode(&sysfs_root);
if (!inode) {
pr_debug("sysfs: could not get root inode\n");
return -ENOMEM;
}
/* instantiate and link root dentry */
root = d_alloc_root(inode);
if (!root) {
pr_debug("%s: could not get root dentry!\n",__FUNCTION__);
iput(inode);
return -ENOMEM;
}
//將sysfs_root關聯到root
root->d_fsdata = &sysfs_root;
sb->s_root = root;
return 0;
}
在這裏要注意幾個全局量. sysfs_sb表示sysfs文件系統的super_block. sysfs_root表示sysfs文件系統根目錄的struct sysfs_dirent.
sysfs_get_inode(&sysfs_root)用來將sysfs_root導出相應的inode.代碼以下:
struct inode * sysfs_get_inode(struct sysfs_dirent *sd)
{
struct inode *inode;
//以super_block和sd->s_ino爲哈希值,到哈希表中尋找相應的inode.若是不存在,則新建
inode = iget_locked(sysfs_sb, sd->s_ino);
//對新生成的inode進行初始化
if (inode && (inode->i_state & I_NEW))
sysfs_init_inode(sd, inode);
return inode;
}
首先,它以sysfs文件系統的super_block和struct sysfs_dirent.的s_ino成員的值作爲哈希值到哈希表中尋找相應的inode.若是在哈希表中不存在這個inode,那就新建一個,並將它鏈入到哈希表.以後,調用sysfs_init_inode()對生成的inode進行初始化.顯然.在mount的時候是不會生成inode的.一定會進入sysfs_init_inode()函數.代碼以下:
static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
{
struct bin_attribute *bin_attr;
inode->i_blocks = 0;
inode->i_mapping->a_ops = &sysfs_aops;
inode->i_mapping->backing_dev_info = &sysfs_backing_dev_info;
inode->i_op = &sysfs_inode_operations;
inode->i_ino = sd->s_ino;
lockdep_set_class(&inode->i_mutex, &sysfs_inode_imutex_key);
if (sd->s_iattr) {
/* sysfs_dirent has non-default attributes
* get them for the new inode from persistent copy
* in sysfs_dirent
*/
set_inode_attr(inode, sd->s_iattr);
} else
set_default_inode_attr(inode, sd->s_mode);
/* initialize inode according to type */
switch (sysfs_type(sd)) {
case SYSFS_DIR:
inode->i_op = &sysfs_dir_inode_operations;
inode->i_fop = &sysfs_dir_operations;
inode->i_nlink = sysfs_count_nlink(sd);
break;
case SYSFS_KOBJ_ATTR:
inode->i_size = PAGE_SIZE;
inode->i_fop = &sysfs_file_operations;
break;
case SYSFS_KOBJ_BIN_ATTR:
bin_attr = sd->s_bin_attr.bin_attr;
inode->i_size = bin_attr->size;
inode->i_fop = &bin_fops;
break;
case SYSFS_KOBJ_LINK:
inode->i_op = &sysfs_symlink_inode_operations;
break;
default:
BUG();
}
unlock_new_inode(inode);
}
在這裏,咱們能夠看到sysfs文件系統中的各類操做函數了..
在syfs文件系統中,怎麼樣判斷一個目錄下是否有這個文件呢?
在前面有關文件系統的分析中咱們能夠看.有關文件的查找實際上都會由inod->i_op-> lookup()函數進行判斷.在sysfs中,這個函數對應爲sysfs_lookup().代碼以下:
static struct dentry * sysfs_lookup(struct inode *dir, struct dentry *dentry,
struct nameidata *nd)
{
struct dentry *ret = NULL;
//取得父結點對應的sysfs_dirent
struct sysfs_dirent *parent_sd = dentry->d_parent->d_fsdata;
struct sysfs_dirent *sd;
struct inode *inode;
mutex_lock(&sysfs_mutex);
//父結點的sysfs_dirent中是否有相應的子結點
sd = sysfs_find_dirent(parent_sd, dentry->d_name.name);
/* no such entry */
//若是沒有.這個結點是不存在的
if (!sd) {
ret = ERR_PTR(-ENOENT);
goto out_unlock;
}
/* attach dentry and inode */
//若是有這個結點,爲之生成inod結構.
inode = sysfs_get_inode(sd);
if (!inode) {
ret = ERR_PTR(-ENOMEM);
goto out_unlock;
}
/* instantiate and hash dentry */
dentry->d_op = &sysfs_dentry_ops;
//關聯dentry與sysfs_dirent
dentry->d_fsdata = sysfs_get(sd);
d_instantiate(dentry, inode);
d_rehash(dentry);
out_unlock:
mutex_unlock(&sysfs_mutex);
return ret;
}
因而可知,它的判斷會轉入到相應的sysfs_dirent中進行判斷.若是設備模型在建立目錄/文件的時候並不會建立dentry或者inode.只會操做sysfs_dirent結構. 若是找到了這個結構,就爲這個結構生成inode.並將其關聯到denry中.
sysfs_find_dirent()以下:
struct sysfs_dirent *sysfs_find_dirent(struct sysfs_dirent *parent_sd,
const unsigned char *name)
{
struct sysfs_dirent *sd;
for (sd = parent_sd->s_dir.children; sd; sd = sd->s_sibling)
if (!strcmp(sd->s_name, name))
return sd;
return NULL;
}
它用的搜索方法就是咱們在以前分析sysfs_dirent結構所講述的.分析到這裏,sysfs的大概輪廓就出如今咱們的眼前了.^_^.接下來分析sysfs文件系統中目錄的建立過程
三:在sysfs文件系統中建立目錄
在linux設備模型中,每註冊一個kobject.就會爲之建立一個目錄.具體的流程在分析linux設備模型的時候再給出詳細的分析.建立目錄的接口爲: sysfs_create_dir().代碼以下:
int sysfs_create_dir(struct kobject * kobj)
{
struct sysfs_dirent *parent_sd, *sd;
int error = 0;
BUG_ON(!kobj);
//若是kobject沒有指定父結點,則將其父結點指定爲sysfs的根目錄syfs_root
if (kobj->parent)
parent_sd = kobj->parent->sd;
else
parent_sd = &sysfs_root;
//建立目錄
error = create_dir(kobj, parent_sd, kobject_name(kobj), &sd);
//kobj->sd 指向對應的sysfs_dirent
if (!error)
kobj->sd = sd;
return error;
}
在這裏,先爲結點指定父目錄,而後調用create_dir()在父目錄下生成結點.代碼以下:
static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
const char *name, struct sysfs_dirent **p_sd)
{
//指定目錄的模式
umode_t mode = S_IFDIR| S_IRWXU | S_IRUGO | S_IXUGO;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
int rc;
/* allocate */
//分配並初始化一個sysfs_dirent
sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
if (!sd)
return -ENOMEM;
//初始化sd->s_dir.kobj字段
sd->s_dir.kobj = kobj;
/* link in */
//acxt是一個臨時變量.它用來存放父結點的相關信息
//設置acxt->parent_sd 爲父結點的sysfs_dirent.acxt->parent_inode爲父結點的inode
sysfs_addrm_start(&acxt, parent_sd);
//設置sd->s_parent.並按inod值按順序鏈入父結點的children鏈表
rc = sysfs_add_one(&acxt, sd);
sysfs_addrm_finish(&acxt);
if (rc == 0)
*p_sd = sd;
else
sysfs_put(sd);
return rc;
}
在這裏,爲子節點生成了對應的sysfs_dirent.設置了它的父結點域,並將其鏈入到父結點的children鏈表.這樣,在文件系統中查找父目錄下面的子結點了.
四:在sysfs中建立通常屬性文件
Kobject的每一項屬性都對應在sysfs文件系統中,kobject對應的目錄下的一個文件.文件名稱與屬性名稱相同.建立通常屬性的接口爲sysfs_create_file().代碼以下:
int sysfs_create_file(struct kobject * kobj, const struct attribute * attr)
{
BUG_ON(!kobj || !kobj->sd || !attr);
//kobject->sd: 爲kobject表示目錄的struct sysfs_dirent結構
return sysfs_add_file(kobj->sd, attr, SYSFS_KOBJ_ATTR);
}
最終會調用sysfs_add_file().參數attr.是要生成文件的屬性值.
int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
int type)
{
//文件對應的屬性
umode_t mode = (attr->mode & S_IALLUGO) | S_IFREG;
struct sysfs_addrm_cxt acxt;
struct sysfs_dirent *sd;
int rc;
//建立一個新的sysfs_dirent.對應的名稱爲attr->name.即屬性的名稱
sd = sysfs_new_dirent(attr->name, mode, type);
if (!sd)
return -ENOMEM;
//設置屬性值
sd->s_attr.attr = (void *)attr;
//將子結點的struct sysfs_dirent結構關鏈到父結點
sysfs_addrm_start(&acxt, dir_sd);
rc = sysfs_add_one(&acxt, sd);
sysfs_addrm_finish(&acxt);
if (rc)
sysfs_put(sd);
return rc;
}
這個流程與建立目錄的流程大部份相同.不相同的只是建立目錄時,它的父目錄爲上一層結點,建立文件時,它的父目錄就是kobject對應的struct sysfs_dirent.
這樣,在kobject對應的目錄下面就能夠看到這個文件了.^_^
文件建好以後,要怎麼樣去讀寫呢?
回憶一下,在sysfs文件系統中,inode的初始化:
static void sysfs_init_inode(struct sysfs_dirent *sd, struct inode *inode)
{
……
…….
case SYSFS_KOBJ_ATTR:
inode->i_size = PAGE_SIZE;
inode->i_fop = &sysfs_file_operations;
……
}
sysfs_file_operations的定義以下:
const struct file_operations sysfs_file_operations = {
.read = sysfs_read_file,
.write = sysfs_write_file,
.llseek = generic_file_llseek,
.open = sysfs_open_file,
.release = sysfs_release,
.poll = sysfs_poll,
};
文件的操做所有都在這裏了,咱們從打開文件提及.
sysfs_open_file()代碼以下:
static int sysfs_open_file(struct inode *inode, struct file *file)
{
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_buffer *buffer;
struct sysfs_ops *ops;
int error = -EACCES;
/* need attr_sd for attr and ops, its parent for kobj */
if (!sysfs_get_active_two(attr_sd))
return -ENODEV;
/* every kobject with an attribute needs a ktype assigned */
//將buffer->ops設置爲kobj->ktype->sysfs_ops
if (kobj->ktype && kobj->ktype->sysfs_ops)
ops = kobj->ktype->sysfs_ops;
else {
printk(KERN_ERR "missing sysfs attribute operations for "
"kobject: %s\n", kobject_name(kobj));
WARN_ON(1);
goto err_out;
}
/* File needs write support.
* The inode's perms must say it's ok,
* and we must have a store method.
*/
if (file->f_mode & FMODE_WRITE) {
if (!(inode->i_mode & S_IWUGO) || !ops->store)
goto err_out;
}
/* File needs read support.
* The inode's perms must say it's ok, and we there
* must be a show method for it.
*/
if (file->f_mode & FMODE_READ) {
if (!(inode->i_mode & S_IRUGO) || !ops->show)
goto err_out;
}
/* No error? Great, allocate a buffer for the file, and store it
* it in file->private_data for easy access.
*/
error = -ENOMEM;
buffer = kzalloc(sizeof(struct sysfs_buffer), GFP_KERNEL);
if (!buffer)
goto err_out;
mutex_init(&buffer->mutex);
buffer->needs_read_fill = 1;
buffer->ops = ops;
file->private_data = buffer;
/* make sure we have open dirent struct */
//將buffer鏈至attr_sd->s_attr.open鏈表上
error = sysfs_get_open_dirent(attr_sd, buffer);
if (error)
goto err_free;
/* open succeeded, put active references */
sysfs_put_active_two(attr_sd);
return 0;
err_free:
kfree(buffer);
err_out:
sysfs_put_active_two(attr_sd);
return error;
}
在這段代碼中,須要注意如下幾個操做,
1:buffer連接在file-> private_data.具buffer還被連接在sysfs_dirent->s_attr.open.這樣.VFS經過file.設備模型經過kobject->sd->s_attr.open都能找到這個要操做的 buffer
2:buffer->ops被設置爲了kobject->ktype->sysfs_ops
文件的寫操做入口以下:
static ssize_t
sysfs_write_file(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
struct sysfs_buffer * buffer = file->private_data;
ssize_t len;
mutex_lock(&buffer->mutex);
//將buf中的內容copy到了buffer->page
len = fill_write_buffer(buffer, buf, count);
//與設備模型的交互
if (len > 0)
len = flush_write_buffer(file->f_path.dentry, buffer, len);
//更新ppos
if (len > 0)
*ppos += len;
mutex_unlock(&buffer->mutex);
return len;
}
首先,調用fill_write_buffer()將用戶空間傳值下來的數值copy到buffer->page.而後再調用flush_write_buffer()與設備模型進行交互.
Flush_wirte_buffer()代碼以下:
static int
flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t count)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_ops * ops = buffer->ops;
int rc;
/* need attr_sd for attr and ops, its parent for kobj */
if (!sysfs_get_active_two(attr_sd))
return -ENODEV;
rc = ops->store(kobj, attr_sd->s_attr.attr, buffer->page, count);
sysfs_put_active_two(attr_sd);
return rc;
}
咱們在分析open()操做的時候曾分析到.buffer的ops是kobject->ktype->ops.在這裏,它至關於調用了kobject->ktype->ops->store().參數分別爲:操做的kobject.文件對應的屬性.寫入的值和值的長度.
Sysfs這樣設計,主要是在VFS保持一個統一的接口,由於每個kobject對應的屬性值都不相同,.相應的,操做方法也不同,這樣,在ktype中就區別開來了.
文件的讀操做
相應接口爲sysfs_read_file().代碼以下:
static ssize_t
sysfs_read_file(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct sysfs_buffer * buffer = file->private_data;
ssize_t retval = 0;
mutex_lock(&buffer->mutex);
//從設備模型中將值取出.並存入buffer->page中
if (buffer->needs_read_fill) {
retval = fill_read_buffer(file->f_path.dentry,buffer);
if (retval)
goto out;
}
//將buffer->page中的值copy到用戶空間的buf
pr_debug("%s: count = %zd, ppos = %lld, buf = %s\n",
__FUNCTION__, count, *ppos, buffer->page);
retval = simple_read_from_buffer(buf, count, ppos, buffer->page,
buffer->count);
out:
mutex_unlock(&buffer->mutex);
return retval;
}
讀操做的流程恰好和寫操做流程相反.它先從設備模型中取值,而後再copy到用戶空間.
fill_read_buffer的代碼以下:
static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer)
{
struct sysfs_dirent *attr_sd = dentry->d_fsdata;
struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_ops * ops = buffer->ops;
int ret = 0;
ssize_t count;
if (!buffer->page)
buffer->page = (char *) get_zeroed_page(GFP_KERNEL);
if (!buffer->page)
return -ENOMEM;
/* need attr_sd for attr and ops, its parent for kobj */
if (!sysfs_get_active_two(attr_sd))
return -ENODEV;
buffer->event = atomic_read(&attr_sd->s_attr.open->event);
count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
sysfs_put_active_two(attr_sd);
/*
* The code works fine with PAGE_SIZE return but it's likely to
* indicate truncated result or overflow in normal use cases.
*/
if (count >= (ssize_t)PAGE_SIZE) {
print_symbol("fill_read_buffer: %s returned bad count\n",
(unsigned long)ops->show);
/* Try to struggle along */
count = PAGE_SIZE - 1;
}
if (count >= 0) {
buffer->needs_read_fill = 0;
buffer->count = count;
} else {
ret = count;
}
return ret;
}
在這裏,咱們看到,最終會調用kobject->ktype->ops->show()方法.參數含義同寫操做中是同樣的.
五:在sysfs中建立二進制屬性文件
二制制屬性一般用於firmware 中.它用來更新firmware 的固件.它的接口爲sysfs_create_bin_file()
代碼以下:
int sysfs_create_bin_file(struct kobject * kobj, struct bin_attribute * attr)
{
BUG_ON(!kobj || !kobj->sd || !attr);
return sysfs_add_file(kobj->sd, &attr->attr, SYSFS_KOBJ_BIN_ATTR);
}
Sysfs_add_file()這個函數咱們在以前已經分析過.在這個地方,可能會引發迷糊.由於在sysfs_add_file()中.有:
int sysfs_add_file(struct sysfs_dirent *dir_sd, const struct attribute *attr,
int type)
{
……
sd->s_attr.attr = (void *)attr;
……
}
這裏爲何是sd->a_attr呢?應該是sd-> s_bin_attr纔對吧!
仔細觀察struct sysfs_dirent的結構,以下:
struct sysfs_dirent {
atomic_t s_count;
atomic_t s_active;
struct sysfs_dirent *s_parent;
struct sysfs_dirent *s_sibling;
const char *s_name;
union {
struct sysfs_elem_dir s_dir;
struct sysfs_elem_symlink s_symlink;
struct sysfs_elem_attr s_attr;
struct sysfs_elem_bin_attr s_bin_attr;
};
unsigned int s_flags;
ino_t s_ino;
umode_t s_mode;
struct iattr *s_iattr;
};
注意中間是一個union 結構,實際上只佔用一個內存空間.並且s_attr與s_bin_arre的第一個屬性都爲struct attribute.因此在這裏, sd->a_attr與sd-> s_bin_attr;的效果是同樣的.內核這樣處理,又少用了一個接口.看來做者在設計的時候,花了不少的心思.
二進制的文件讀寫與普通屬性的文件讀寫方式大部份都同樣,所不一樣的是.二進制文件的讀寫接口分別是: sysfs_dirent ->s_bin_attr.bin_attr->read和sysfs_dirent ->s_bin_attr.bin_attr->write
六:sysfs文件系統中的連接文件
建立連接文件的接口爲: sysfs_create_link().代碼以下:
int sysfs_create_link(struct kobject * kobj, struct kobject * target, const char * name)
{
struct sysfs_dirent *parent_sd = NULL;
struct sysfs_dirent *target_sd = NULL;
struct sysfs_dirent *sd = NULL;
struct sysfs_addrm_cxt acxt;
int error;
BUG_ON(!name);
if (!kobj)
parent_sd = &sysfs_root;
else
parent_sd = kobj->sd;
error = -EFAULT;
if (!parent_sd)
goto out_put;
/* target->sd can go away beneath us but is protected with
* sysfs_assoc_lock. Fetch target_sd from it.
*/
spin_lock(&sysfs_assoc_lock);
if (target->sd)
target_sd = sysfs_get(target->sd);
spin_unlock(&sysfs_assoc_lock);
error = -ENOENT;
if (!target_sd)
goto out_put;
error = -ENOMEM;
sd = sysfs_new_dirent(name, S_IFLNK|S_IRWXUGO, SYSFS_KOBJ_LINK);
if (!sd)
goto out_put;
sd->s_symlink.target_sd = target_sd;
target_sd = NULL; /* reference is now owned by the symlink */
sysfs_addrm_start(&acxt, parent_sd);
error = sysfs_add_one(&acxt, sd);
sysfs_addrm_finish(&acxt);
if (error)
goto out_put;
return 0;
out_put:
sysfs_put(target_sd);
sysfs_put(sd);
return error;
}
上面的操做大部份都與普通文件的建立類似,所不一樣的只是下面這段代碼的區別:
sd->s_symlink.target_sd = target_sd;
就是在sd->s_symlink.target_sd保存到連接目的地的sysfs_dirent.
符號連接的操做以下所示:
const struct inode_operations sysfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = sysfs_follow_link,
.put_link = sysfs_put_link,
};
在經過符號連接查找文件的時候,在VFS中會調用inod->i_op->.readlink()進行操做.它的代碼以下:
int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
{
struct nameidata nd;
void *cookie;
nd.depth = 0;
cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
if (!IS_ERR(cookie)) {
int res = vfs_readlink(dentry, buffer, buflen, nd_get_link(&nd));
if (dentry->d_inode->i_op->put_link)
dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
cookie = ERR_PTR(res);
}
return PTR_ERR(cookie);
}
它的操做和其它文件系統同樣,都是通用follow_link()取得目的地的路徑.而後保存到nd->saved_names[]中,而後,調用vfs_readlink()將目標路徑copy到buffer中.接着,調用put_link進行過後處理工做.
Follow_link()的操做以下示:
static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
int error = -ENOMEM;
unsigned long page = get_zeroed_page(GFP_KERNEL);
if (page)
error = sysfs_getlink(dentry, (char *) page);
nd_set_link(nd, error ? ERR_PTR(error) : (char *)page);
return NULL;
}
Ne_set_link()是將page中的值copy到nd->saved_name[]中.
sysfs_getlink()的代碼以下:
sysfs_getlink()-àsysfs_get_target_path()
static int sysfs_get_target_path(struct sysfs_dirent *parent_sd,
struct sysfs_dirent *target_sd, char *path)
{
struct sysfs_dirent *base, *sd;
char *s = path;
int len = 0;
/* go up to the root, stop at the base */
base = parent_sd;
while (base->s_parent) {
sd = target_sd->s_parent;
while (sd->s_parent && base != sd)
sd = sd->s_parent;
if (base == sd)
break;
strcpy(s, "../");
s += 3;
base = base->s_parent;
}
/* determine end of target string for reverse fillup */
sd = target_sd;
while (sd->s_parent && sd != base) {
len += strlen(sd->s_name) + 1;
sd = sd->s_parent;
}
/* check limits */
if (len
return -EINVAL;
len--;
if ((s - path) + len > PATH_MAX)
return -ENAMETOOLONG;
/* reverse fillup of target string from target to base */
sd = target_sd;
while (sd->s_parent && sd != base) {
int slen = strlen(sd->s_name);
len -= slen;
strncpy(s + len, sd->s_name, slen);
if (len)
s[--len] = '/';
sd = sd->s_parent;
}
return 0;
}
這段代碼的邏輯比較簡單.它先是找到目標路徑和當前路徑相同的父結點,而後再沿着目標結點往相同的父結點向上走,將路徑依次從緩存區後面往前面保存.
例如: /sys/eric/kernel/test連接到了/sys/sys/device.
它先找到兩個路徑共有的父結點/sys
此時緩存區爲:/sys
而後,沿着/sys/sys/device往/sys移動.路徑加從緩存區的後面往前面加.依次爲:
1: /sys/ /device
2:/sys/sys/device
這樣就找到了目的地的路徑. ^_^.
後面sysfs_put_link()的操做就再也不講述了,它只是釋放掉緩存區.
六:小結
在本小節裏,咱們深刻探討了sysfs文件系統的實現機理.這對於咱們理解linux設備模型是頗有幫助的.
歡迎關注本站公眾號,獲取更多信息