/**
* ubifs_add_bud - add bud LEB to the tree of buds and its journal head list.
* @c: UBIFS file-system description object
* @bud: the bud to add
*/
void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
{
struct rb_node **p, *parent = NULL;
struct ubifs_bud *b;
struct ubifs_jhead *jhead;
spin_lock(&c->buds_lock);
p = &c->buds.rb_node;
while (*p) {
parent = *p;
b = rb_entry(parent, struct ubifs_bud, rb);
ubifs_assert(bud->lnum != b->lnum);
if (bud->lnum < b->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&bud->rb, parent, p);
rb_insert_color(&bud->rb, &c->buds);
if (c->jheads) {
jhead = &c->jheads[bud->jhead];
list_add_tail(&bud->list, &jhead->buds_list);
} else
ubifs_assert(c->replaying && c->ro_mount);
/*
* Note, although this is a new bud, we anyway account this space now,
* before any data has been written to it, because this is about to
* guarantee fixed mount time, and this bud will anyway be read and
* scanned.
*/
c->bud_bytes += c->leb_size - bud->start;
dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
spin_unlock(&c->buds_lock);
}
/**
* ubifs_log_start_commit - start commit.
* @c: UBIFS file-system description object
* @ltail_lnum: return new log tail LEB number
*
* The commit operation starts with writing "commit start" node to the log and
* reference nodes for all journal heads which will define new journal after
* the commit has been finished. The commit start and reference nodes are
* written in one go to the nearest empty log LEB (hence, when commit is
* finished UBIFS may safely unmap all the previous log LEBs). This function
* returns zero in case of success and a negative error code in case of
* failure.
*/
int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
{
void *buf;
struct ubifs_cs_node *cs;
struct ubifs_ref_node *ref;
int err, i, max_len, len;
err = dbg_check_bud_bytes(c);
if (err)
return err;
max_len = UBIFS_CS_NODE_SZ + c->jhead_cnt * UBIFS_REF_NODE_SZ;
max_len = ALIGN(max_len, c->min_io_size);
buf = cs = kmalloc(max_len, GFP_NOFS);
if (!buf)
return -ENOMEM;
cs->ch.node_type = UBIFS_CS_NODE;
cs->cmt_no = cpu_to_le64(c->cmt_no);
ubifs_prepare_node(c, cs, UBIFS_CS_NODE_SZ, 0);
/*
* Note, we do not lock 'c->log_mutex' because this is the commit start
* phase and we are exclusively using the log. And we do not lock
* write-buffer because nobody can write to the file-system at this
* phase.
*/
len = UBIFS_CS_NODE_SZ;
for (i = 0; i < c->jhead_cnt; i++) {
int lnum = c->jheads[i].wbuf.lnum;
int offs = c->jheads[i].wbuf.offs;
if (lnum == -1 || offs == c->leb_size)
continue;
dbg_log("add ref to LEB %d:%d for jhead %s",
lnum, offs, dbg_jhead(i));
ref = buf + len;
ref->ch.node_type = UBIFS_REF_NODE;
ref->lnum = cpu_to_le32(lnum);
ref->offs = cpu_to_le32(offs);
ref->jhead = cpu_to_le32(i);
ubifs_prepare_node(c, ref, UBIFS_REF_NODE_SZ, 0);
len += UBIFS_REF_NODE_SZ;
}
ubifs_pad(c, buf + len, ALIGN(len, c->min_io_size) - len);
#ifdef CONFIG_UBIFS_FS_FULL_USE_LOG
/* Not Switch to next log LEB, programming next available page in the same log LEB continuously*/
/* if available page is in the end of the LEB, switch to next LEB*/
if(c->lhead_offs >= (c->leb_size - (c->min_io_size * 4)) )
{
int old_lnum = c->lhead_lnum;
int old_offs = c->lhead_offs;
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
c->lhead_offs = 0;
ubifs_msg("switch log LEB %d:%d to %d:%d\n", old_lnum, old_offs, c->lhead_lnum, c->lhead_offs);
}
#else
/* Switch to the next log LEB */
if (c->lhead_offs) {
int old_lnum = c->lhead_lnum;
int old_offs = c->lhead_offs;
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
c->lhead_offs = 0;
ubifs_msg("switch log LEB %d:%d to %d:%d\n", old_lnum, old_offs, c->lhead_lnum, c->lhead_offs);
}
#endif
if (c->lhead_offs == 0) {
/* Must ensure next LEB has been unmapped */
err = ubifs_leb_unmap(c, c->lhead_lnum);
if (err)
goto out;
}
len = ALIGN(len, c->min_io_size);
dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
err = ubifs_leb_write(c, c->lhead_lnum, cs, c->lhead_offs, len); //MTK, modify offset 0 -> c->lhead_offs
if (err)
goto out;
*ltail_lnum = c->lhead_lnum;
//.........这里部分代码省略.........
CSOCKET_CNODE *cconnp_reserve(CCONNP *cconnp)
{
CSOCKET_CNODE *csocket_cnode;
int sockfd;
if(EC_TRUE == cqueue_is_empty(CCONNP_IDLE_CONN_QUEUE(cconnp)))
{
CQUEUE_DATA *cqueue_data;
/*if no idle, new one*/
if(csocket_connect(CCONNP_SRV_IPADDR(cconnp), CCONNP_SRV_PORT(cconnp), CSOCKET_IS_NONBLOCK_MODE, &sockfd))
{
dbg_log(SEC_0154_CCONNP, 0)(LOGSTDOUT, "error:cconnp_reserve: connect server %s:%ld failed\n",
CCONNP_SRV_IPADDR_STR(cconnp), CCONNP_SRV_PORT(cconnp));
return (NULL_PTR);
}
csocket_cnode = csocket_cnode_new(CCONNP_SRV_TCID(cconnp), sockfd, CSOCKET_TYPE_TCP, CCONNP_SRV_IPADDR(cconnp), CCONNP_SRV_PORT(cconnp));
if(NULL_PTR == csocket_cnode)
{
dbg_log(SEC_0154_CCONNP, 0)(LOGSTDOUT, "error:cconnp_reserve: new csocket_cnode for socket %d to server %s:%ld failed\n",
sockfd, CCONNP_SRV_IPADDR_STR(cconnp), CCONNP_SRV_PORT(cconnp));
csocket_close(sockfd);
return (NULL_PTR);
}
cqueue_data = cqueue_push(CCONNP_IDLE_CONN_QUEUE(cconnp), (void *)csocket_cnode);
if(NULL_PTR == cqueue_data)
{
dbg_log(SEC_0154_CCONNP, 0)(LOGSTDOUT, "error:cconnp_reserve: push socket %d to server %s:%ld failed\n",
sockfd, CCONNP_SRV_IPADDR_STR(cconnp), CCONNP_SRV_PORT(cconnp));
csocket_cnode_free(csocket_cnode);
return (NULL_PTR);
}
CSOCKET_CNODE_WORK_NODE(csocket_cnode) = (void *)cqueue_data;
CSOCKET_CNODE_WORK_OWNER(csocket_cnode) = (void *)cconnp;
CSOCKET_CNODE_WORK_RELEASE(csocket_cnode) = (CSOCKET_CNODE_WORK_REL)cconnp_erase;;
CSOCKET_CNODE_WORK_STATUS(csocket_cnode) = CSOCKET_CNODE_WORK_STATUS_IDLE;
dbg_log(SEC_0154_CCONNP, 9)(LOGSTDOUT, "[DEBUG] cconnp_reserve: create and push sockfd %d to server %s:%ld done\n",
CSOCKET_CNODE_SOCKFD(csocket_cnode),
CCONNP_SRV_IPADDR_STR(cconnp), CCONNP_SRV_PORT(cconnp));
}
/*reserve one idle*/
csocket_cnode = cqueue_pop(CCONNP_IDLE_CONN_QUEUE(cconnp));
if(NULL_PTR == csocket_cnode)
{
dbg_log(SEC_0154_CCONNP, 0)(LOGSTDOUT, "error:cconnp_reserve: server %s:%ld has no idle conn\n",
CCONNP_SRV_IPADDR_STR(cconnp), CCONNP_SRV_PORT(cconnp));
return (NULL_PTR);
}
if(EC_TRUE == CSOCKET_CNODE_WORK_PUSHED(csocket_cnode))
{
/*when csocket_cnode was released and pushed to connp, RD event was set. here need to clear it*/
cepoll_del_event(task_brd_default_get_cepoll(), CSOCKET_CNODE_SOCKFD(csocket_cnode), CEPOLL_RD_EVENT);
CSOCKET_CNODE_WORK_PUSHED(csocket_cnode) = EC_FALSE;
}
CSOCKET_CNODE_WORK_NODE(csocket_cnode) = NULL_PTR;
CSOCKET_CNODE_WORK_OWNER(csocket_cnode) = (void *)cconnp;
CSOCKET_CNODE_WORK_RELEASE(csocket_cnode) = (CSOCKET_CNODE_WORK_REL)cconnp_release;
CSOCKET_CNODE_WORK_STATUS(csocket_cnode) = CSOCKET_CNODE_WORK_STATUS_NONE;
dbg_log(SEC_0154_CCONNP, 9)(LOGSTDOUT, "[DEBUG] cconnp_reserve: pop sockfd %d from server %s:%ld done\n",
CSOCKET_CNODE_SOCKFD(csocket_cnode),
CCONNP_SRV_IPADDR_STR(cconnp), CCONNP_SRV_PORT(cconnp));
return (csocket_cnode);
}
开发者ID:petercloud,项目名称:RFS,代码行数:71,代码来源:cconnp.c
示例7: ubifs_add_bud_to_log
/**
* ubifs_add_bud_to_log - add a new bud to the log.
* @c: UBIFS file-system description object
* @jhead: journal head the bud belongs to
* @lnum: LEB number of the bud
* @offs: starting offset of the bud
*
* This function writes reference node for the new bud LEB @lnum it to the log,
* and adds it to the buds tress. It also makes sure that log size does not
* exceed the 'c->max_bud_bytes' limit. Returns zero in case of success,
* %-EAGAIN if commit is required, and a negative error codes in case of
* failure.
*/
int ubifs_add_bud_to_log(struct ubifs_info *c, int jhead, int lnum, int offs)
{
int err;
struct ubifs_bud *bud;
struct ubifs_ref_node *ref;
bud = kmalloc(sizeof(struct ubifs_bud), GFP_NOFS);
if (!bud)
return -ENOMEM;
ref = kzalloc(c->ref_node_alsz, GFP_NOFS);
if (!ref) {
kfree(bud);
return -ENOMEM;
}
mutex_lock(&c->log_mutex);
ubifs_assert(!c->ro_media && !c->ro_mount);
if (c->ro_error) {
err = -EROFS;
goto out_unlock;
}
/* Make sure we have enough space in the log */
if (empty_log_bytes(c) - c->ref_node_alsz < c->min_log_bytes) {
dbg_log("not enough log space - %lld, required %d",
empty_log_bytes(c), c->min_log_bytes);
ubifs_commit_required(c);
err = -EAGAIN;
goto out_unlock;
}
/*
* Make sure the amount of space in buds will not exceed the
* 'c->max_bud_bytes' limit, because we want to guarantee mount time
* limits.
*
* It is not necessary to hold @c->buds_lock when reading @c->bud_bytes
* because we are holding @c->log_mutex. All @c->bud_bytes take place
* when both @c->log_mutex and @c->bud_bytes are locked.
*/
if (c->bud_bytes + c->leb_size - offs > c->max_bud_bytes) {
dbg_log("bud bytes %lld (%lld max), require commit",
c->bud_bytes, c->max_bud_bytes);
ubifs_commit_required(c);
err = -EAGAIN;
goto out_unlock;
}
/*
* If the journal is full enough - start background commit. Note, it is
* OK to read 'c->cmt_state' without spinlock because integer reads
* are atomic in the kernel.
*/
if (c->bud_bytes >= c->bg_bud_bytes &&
c->cmt_state == COMMIT_RESTING) {
dbg_log("bud bytes %lld (%lld max), initiate BG commit",
c->bud_bytes, c->max_bud_bytes);
ubifs_request_bg_commit(c);
}
bud->lnum = lnum;
bud->start = offs;
bud->jhead = jhead;
ref->ch.node_type = UBIFS_REF_NODE;
ref->lnum = cpu_to_le32(bud->lnum);
ref->offs = cpu_to_le32(bud->start);
ref->jhead = cpu_to_le32(jhead);
if (c->lhead_offs > c->leb_size - c->ref_node_alsz) {
c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum);
c->lhead_offs = 0;
}
if (c->lhead_offs == 0) {
/* Must ensure next log LEB has been unmapped */
err = ubifs_leb_unmap(c, c->lhead_lnum);
if (err)
goto out_unlock;
}
if (bud->start == 0) {
/*
* Before writing the LEB reference which refers an empty LEB
* to the log, we have to make sure it is mapped, because
* otherwise we'd risk to refer an LEB with garbage in case of
* an unclean reboot, because the target LEB might have been
//.........这里部分代码省略.........
/* Walk through the table and remove all entries which lifetime ended.
We have a problem here. To actually remove the entries we must get
the write-lock. But since we want to keep the time we have the
lock as short as possible we cannot simply acquire the lock when we
start looking for timedout entries.
Therefore we do it in two stages: first we look for entries which
must be invalidated and remember them. Then we get the lock and
actually remove them. This is complicated by the way we have to
free the data structures since some hash table entries share the same
data. */
time_t
prune_cache (struct database_dyn *table, time_t now, int fd)
{
size_t cnt = table->head->module;
/* If this table is not actually used don't do anything. */
if (cnt == 0)
{
if (fd != -1)
{
/* Reply to the INVALIDATE initiator. */
int32_t resp = 0;
writeall (fd, &resp, sizeof (resp));
}
/* No need to do this again anytime soon. */
return 24 * 60 * 60;
}
/* If we check for the modification of the underlying file we invalidate
the entries also in this case. */
if (table->inotify_descr < 0 && table->check_file && now != LONG_MAX)
{
struct stat64 st;
if (stat64 (table->filename, &st) < 0)
{
char buf[128];
/* We cannot stat() the file, disable file checking if the
file does not exist. */
dbg_log (_("cannot stat() file `%s': %s"),
table->filename, strerror_r (errno, buf, sizeof (buf)));
if (errno == ENOENT)
table->check_file = 0;
}
else
{
if (st.st_mtime != table->file_mtime)
{
/* The file changed. Invalidate all entries. */
now = LONG_MAX;
table->file_mtime = st.st_mtime;
}
}
}
/* We run through the table and find values which are not valid anymore.
Note that for the initial step, finding the entries to be removed,
we don't need to get any lock. It is at all timed assured that the
linked lists are set up correctly and that no second thread prunes
the cache. */
bool *mark;
size_t memory_needed = cnt * sizeof (bool);
bool mark_use_alloca;
if (__builtin_expect (memory_needed <= MAX_STACK_USE, 1))
{
mark = alloca (cnt * sizeof (bool));
memset (mark, '\0', memory_needed);
mark_use_alloca = true;
}
else
{
mark = xcalloc (1, memory_needed);
mark_use_alloca = false;
}
size_t first = cnt + 1;
size_t last = 0;
char *const data = table->data;
bool any = false;
if (__builtin_expect (debug_level > 2, 0))
dbg_log (_("pruning %s cache; time %ld"),
dbnames[table - dbs], (long int) now);
#define NO_TIMEOUT LONG_MAX
time_t next_timeout = NO_TIMEOUT;
do
{
ref_t run = table->head->array[--cnt];
while (run != ENDREF)
{
struct hashentry *runp = (struct hashentry *) (data + run);
struct datahead *dh = (struct datahead *) (data + runp->packet);
/* Some debug support. */
if (__builtin_expect (debug_level > 2, 0))
//.........这里部分代码省略.........
请发表评论