continue;
bn = be64_to_cpu(*p);
if (gfs2_holder_initialized(rd_gh)) {
- rgd = (struct gfs2_rgrpd *)rd_gh->gh_gl->gl_object;
+ rgd = gfs2_glock2rgrp(rd_gh->gh_gl);
gfs2_assert_withdraw(sdp,
gfs2_glock_is_locked_by_me(rd_gh->gh_gl));
} else {
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) {
- struct gfs2_rgrpd *rgd;
- rgd = rlist.rl_ghs[x].gh_gl->gl_object;
+ struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
+
rg_blocks += rgd->rd_length;
}
*
* Called when demoting or unlocking an EX glock. We must flush
* to disk all dirty buffers/pages relating to this glock, and must not
- * not return to caller to demote/unlock the glock until I/O is complete.
+ * return to caller to demote/unlock the glock until I/O is complete.
*/
static void rgrp_go_sync(struct gfs2_glock *gl)
{
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct address_space *mapping = &sdp->sd_aspace;
- struct gfs2_rgrpd *rgd = gl->gl_object;
+ struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
if (rgd)
gfs2_rgrp_brelse(rgd);
return ip;
}
+struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl)
+{
+ struct gfs2_rgrpd *rgd;
+
+ spin_lock(&gl->gl_lockref.lock);
+ rgd = gl->gl_object;
+ spin_unlock(&gl->gl_lockref.lock);
+
+ return rgd;
+}
+
static void gfs2_clear_glop_pending(struct gfs2_inode *ip)
{
if (!ip)
*/
static void iopen_go_callback(struct gfs2_glock *gl, bool remote)
{
- struct gfs2_inode *ip = (struct gfs2_inode *)gl->gl_object;
+ struct gfs2_inode *ip = gl->gl_object;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
if (!remote || (sdp->sd_vfs->s_flags & MS_RDONLY))
preempt_enable();
}
+extern struct gfs2_rgrpd *gfs2_glock2rgrp(struct gfs2_glock *gl);
+
#endif /* __INCORE_DOT_H__ */
fail_refresh:
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
- ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
fail_put:
if (io_gl)
gfs2_glock_put(io_gl);
if (gfs2_holder_initialized(&i_gh))
gfs2_glock_dq_uninit(&i_gh);
- ip->i_gl->gl_object = NULL;
+ glock_set_object(ip->i_gl, NULL);
fail:
iget_failed(inode);
return ERR_PTR(error);
if (error)
goto fail_free_inode;
- ip->i_gl->gl_object = ip;
+ glock_set_object(ip->i_gl, ip);
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1);
if (error)
goto fail_free_inode;
if (error)
goto fail_gunlock2;
- ip->i_iopen_gh.gh_gl->gl_object = ip;
+ glock_set_object(ip->i_iopen_gh.gh_gl, ip);
gfs2_glock_put(io_gl);
gfs2_set_iop(inode);
insert_inode_hash(inode);
{
struct gfs2_glock *gl = bd->bd_gl;
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
- struct gfs2_rgrpd *rgd = gl->gl_object;
+ struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
struct gfs2_bitmap *bi = rgd->rd_bits + index;
rb_erase(n, &sdp->sd_rindex_tree);
if (gl) {
- spin_lock(&gl->gl_lockref.lock);
- gl->gl_object = NULL;
- spin_unlock(&gl->gl_lockref.lock);
+ glock_set_object(gl, NULL);
gfs2_glock_add_to_lru(gl);
gfs2_glock_put(gl);
}
error = rgd_insert(rgd);
spin_unlock(&sdp->sd_rindex_spin);
if (!error) {
- rgd->rd_gl->gl_object = rgd;
+ glock_set_object(rgd->rd_gl, rgd);
rgd->rd_gl->gl_vm.start = (rgd->rd_addr * bsize) & PAGE_MASK;
rgd->rd_gl->gl_vm.end = PAGE_ALIGN((rgd->rd_addr +
rgd->rd_length) * bsize) - 1;
gfs2_holder_uninit(gh);
error = err;
} else {
- if (!error)
- error = statfs_slow_fill(
- gh->gh_gl->gl_object, sc);
+ if (!error) {
+ struct gfs2_rgrpd *rgd =
+ gfs2_glock2rgrp(gh->gh_gl);
+
+ error = statfs_slow_fill(rgd, sc);
+ }
gfs2_glock_dq_uninit(gh);
}
}
gfs2_glock_put(ip->i_gl);
ip->i_gl = NULL;
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
- ip->i_iopen_gh.gh_gl->gl_object = NULL;
+ glock_set_object(ip->i_iopen_gh.gh_gl, NULL);
ip->i_iopen_gh.gh_flags |= GL_NOCACHE;
gfs2_glock_dq_uninit(&ip->i_iopen_gh);
}
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) {
- struct gfs2_rgrpd *rgd;
- rgd = rlist.rl_ghs[x].gh_gl->gl_object;
+ struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(rlist.rl_ghs[x].gh_gl);
+
rg_blocks += rgd->rd_length;
}