From: Ilya Dryomov <idryomov@gmail.com>
Date: Wed, 30 May 2018 16:29:14 +0200
Subject: libceph: make abort_on_full a per-osdc setting
Git-commit: c843d13caefad9f2f182f38d6bfe492c9f00e086
Patch-mainline: v4.18-rc1
References: FATE#324714
The intent behind making it a per-request setting was that it would be
set for writes, but not for reads. As it is, the flag is set for all
fs/ceph requests except for pool perm check stat request (technically
a read).
ceph_osdc_abort_on_full() skips reads since the previous commit and
I don't see a use case for marking individual requests.
Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
Acked-by: Jeff Layton <jlayton@redhat.com>
Reviewed-by: "Yan, Zheng" <zyan@redhat.com>
Acked-by: Luis Henriques <lhenriques@suse.com>
---
fs/ceph/addr.c | 1 -
fs/ceph/file.c | 1 -
fs/ceph/super.c | 2 ++
include/linux/ceph/osd_client.h | 2 +-
net/ceph/osd_client.c | 9 ++++-----
5 files changed, 7 insertions(+), 8 deletions(-)
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1950,7 +1950,6 @@ static int __ceph_pool_perm_get(struct c
err = ceph_osdc_start_request(&fsc->client->osdc, rd_req, false);
wr_req->r_mtime = ci->vfs_inode.i_mtime;
- wr_req->r_abort_on_full = true;
err2 = ceph_osdc_start_request(&fsc->client->osdc, wr_req, false);
if (!err)
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -854,7 +854,6 @@ static void ceph_aio_retry_work(struct w
req->r_callback = ceph_aio_complete_req;
req->r_inode = inode;
req->r_priv = aio_req;
- req->r_abort_on_full = true;
ret = ceph_osdc_start_request(req->r_osdc, req, false);
out:
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -616,7 +616,9 @@ static struct ceph_fs_client *create_fs_
err = PTR_ERR(fsc->client);
goto fail;
}
+
fsc->client->extra_mon_dispatch = extra_mon_dispatch;
+ fsc->client->osdc.abort_on_full = true;
if (!fsopt->mds_namespace) {
ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -196,7 +196,6 @@ struct ceph_osd_request {
struct timespec r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
bool r_linger; /* don't resend on failure */
- bool r_abort_on_full; /* return ENOSPC when full */
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
@@ -342,6 +341,7 @@ struct ceph_osd_client {
struct rb_root linger_map_checks;
atomic_t num_requests;
atomic_t num_homeless;
+ bool abort_on_full; /* abort w/ ENOSPC when full */
int abort_err;
struct delayed_work timeout_work;
struct delayed_work osds_timeout_work;
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -964,7 +964,6 @@ struct ceph_osd_request *ceph_osdc_new_r
truncate_size, truncate_seq);
}
- req->r_abort_on_full = true;
req->r_flags = flags;
req->r_base_oloc.pool = layout->pool_id;
req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
@@ -2173,7 +2172,7 @@ again:
(ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
pool_full(osdc, req->r_t.base_oloc.pool))) {
dout("req %p full/pool_full\n", req);
- if (req->r_abort_on_full) {
+ if (osdc->abort_on_full) {
err = -ENOSPC;
} else {
pr_warn_ratelimited("FULL or reached pool quota\n");
@@ -2380,8 +2379,7 @@ static int abort_on_full_fn(struct ceph_
struct ceph_osd_client *osdc = req->r_osdc;
bool *victims = arg;
- if (req->r_abort_on_full &&
- (req->r_flags & CEPH_OSD_FLAG_WRITE) &&
+ if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
(ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
pool_full(osdc, req->r_t.base_oloc.pool))) {
if (!*victims) {
@@ -2404,7 +2402,8 @@ static void ceph_osdc_abort_on_full(stru
{
bool victims = false;
- if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc))
+ if (osdc->abort_on_full &&
+ (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
for_each_request(osdc, abort_on_full_fn, &victims);
}