先想一想如下幾個問題:
1. 若是給你n個B對象,你如何建立一個鏈表將這些數據組織起來?
經常使用方法是:
struct B
{
data part;
struct B *next;
struct B *prev;//for doubly linked list
}linux
2. 若是給你n個B和一個A,將A做爲head將B組織成鏈表,如何實現?
struct A
{
data part for A;
struct B *next;
struct B *prev;//for doubly linked list
}數據結構
3. 若是在一個項目中,有不少如上的應用場景,An, Bn.....咱們如何組織咱們的代碼呢?
若是如上實現,咱們會發現,對於每一種鏈表,咱們都須要寫相應的代碼,而且,代碼雖然類似,卻不一樣。
如
struct B1
{
data part;
struct B1 *next;
struct B1 *prev;//for doubly linked list
}app
struct B2
{
data part;
struct B2 *next;
struct B2 *prev;//for doubly linked list
}less
這樣作的結果,可想而知:大量重複的相似代碼,維護和擴展的負擔很是沉重。async
如何解決這個問題呢?如下,簡單介紹下,linux Kernel中的一種解決辦法。首先看如下幾個數據結構定義和宏定義。
struct list_head {
struct list_head *next, *prev;
};ide
static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}工具
static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}fetch
#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))設計
#define list_entry(ptr, type, member) \
container_of(ptr, type, member)對象
spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
{
list_add_tail(&t->transfer_list, &m->transfers);
}//雙向鏈表的表尾插入
基礎好一點的看了以上幾個定義應該就明白了,它實現了一個通用的循環雙向鏈表的操做。
若是不瞭解container_of這個宏,能夠參考我上篇博文。
有了以上工具,咱們就能夠以下解決文章開頭的幾個問題了:
struct B
{
data part;
struct list_head B_list;
}
struct A
{
data part;
struct list_head A_head;
}
這樣,經過上面的幾個宏,就能夠很容易的將A和B組織成一個雙向鏈表,能夠進行數據插入和迭代遍歷。
如下舉個linux中的實例:(spi傳輸實例,如下的兩個結構體將處理和傳輸實體分開,減小耦合,這是一種良好的設計方法)
struct spi_transfer {
/* it's ok if tx_buf == rx_buf (right?)
* for MicroWire, one buffer must be null
* buffers must work with dma_*map_single() calls, unless
* spi_message.is_dma_mapped reports a pre-existing mapping
*/
const void *tx_buf;
void *rx_buf;
unsigned len;
dma_addr_t tx_dma;
dma_addr_t rx_dma;
unsigned cs_change:1;
u8 bits_per_word;
u16 delay_usecs;
u32 speed_hz;
struct list_head transfer_list;
};
struct spi_message {
struct list_head transfers;
struct spi_device *spi;
unsigned is_dma_mapped:1;
/* REVISIT: we might want a flag affecting the behavior of the
* last transfer ... allowing things like "read 16 bit length L"
* immediately followed by "read L bytes". Basically imposing
* a specific message scheduling algorithm.
*
* Some controller drivers (message-at-a-time queue processing)
* could provide that as their default scheduling algorithm. But
* others (with multi-message pipelines) could need a flag to
* tell them about such special cases.
*/
/* completion is reported through a callback */
void (*complete)(void *context);
void *context;
unsigned actual_length;
int status;
/* for optional use by whatever driver currently owns the
* spi_message ... between calls to spi_async and then later
* complete(), that's the spi_master controller driver.
*/
struct list_head queue;
void *state;
};
int spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by
* software limitations.
*/
if ((master->flags & SPI_MASTER_HALF_DUPLEX)
|| (spi->mode & SPI_3WIRE)) {
struct spi_transfer *xfer;
unsigned flags = master->flags;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
if (xfer->rx_buf && xfer->tx_buf)
return -EINVAL;
if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
return -EINVAL;
if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
return -EINVAL;
}
}
message->spi = spi;
message->status = -EINPROGRESS;
return master->transfer(spi, message);
}
結束。