1
0
mirror of https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git synced 2026-01-11 17:10:13 +00:00

A bunch of libceph fixes split evenly between memory safety and

implementation correctness issues (all marked for stable) and a change
 in maintainers for CephFS: Slava and Alex have formally taken over
 Xiubo's role.
 -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAmlhPnYTHGlkcnlvbW92
 QGdtYWlsLmNvbQAKCRBKf944AhHzi+xRB/4wW8+zp9w+AzK023uL93A9iU4yPM+/
 XYRSIhnR0VWdGTwQsSiQmuwT9RVW5ElH1o0Zzt9BBM9XW8BTIyDnCcdd4yYT+fRc
 ZCG5JUXF0rRgSXYWTpHfEUg5H0wAFCruhhv51vfAuxe5+AFh+7J2/Ct2SraIMt7r
 brShR7vqxUgaBp4TINdsEZBSNBhEIUkPbulZxDDq4+uFN5Cl1ZgESm4QOdxqZFJ8
 lkvPRRjh7rpSIUfULSyH6UVvS/FsSjXrL1rhszlGbgwc297ox+UTk+dOfko8gdmL
 /l92y6Si5CdCxEwLGHvW4lW7qWj9ba9g8v9DCy1BL6dE+CbY1HSjgtJB
 =bROV
 -----END PGP SIGNATURE-----

Merge tag 'ceph-for-6.19-rc5' of https://github.com/ceph/ceph-client

Pull ceph fixes from Ilya Dryomov:
 "A bunch of libceph fixes split evenly between memory safety and
  implementation correctness issues (all marked for stable) and a change
  in maintainers for CephFS: Slava and Alex have formally taken over
  Xiubo's role"

* tag 'ceph-for-6.19-rc5' of https://github.com/ceph/ceph-client:
  libceph: make calc_target() set t->paused, not just clear it
  libceph: reset sparse-read state in osd_fault()
  libceph: return the handler error from mon_handle_auth_done()
  libceph: make free_choose_arg_map() resilient to partial allocation
  ceph: update co-maintainers list in MAINTAINERS
  libceph: replace overzealous BUG_ON in osdmap_apply_incremental()
  libceph: prevent potential out-of-bounds reads in handle_auth_done()
This commit is contained in:
Linus Torvalds 2026-01-09 15:05:19 -10:00
commit 4621c338d3
5 changed files with 34 additions and 14 deletions

View File

@ -5802,7 +5802,8 @@ F: drivers/power/supply/cw2015_battery.c
CEPH COMMON CODE (LIBCEPH) CEPH COMMON CODE (LIBCEPH)
M: Ilya Dryomov <idryomov@gmail.com> M: Ilya Dryomov <idryomov@gmail.com>
M: Xiubo Li <xiubli@redhat.com> M: Alex Markuze <amarkuze@redhat.com>
M: Viacheslav Dubeyko <slava@dubeyko.com>
L: ceph-devel@vger.kernel.org L: ceph-devel@vger.kernel.org
S: Supported S: Supported
W: http://ceph.com/ W: http://ceph.com/
@ -5813,8 +5814,9 @@ F: include/linux/crush/
F: net/ceph/ F: net/ceph/
CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH) CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
M: Xiubo Li <xiubli@redhat.com>
M: Ilya Dryomov <idryomov@gmail.com> M: Ilya Dryomov <idryomov@gmail.com>
M: Alex Markuze <amarkuze@redhat.com>
M: Viacheslav Dubeyko <slava@dubeyko.com>
L: ceph-devel@vger.kernel.org L: ceph-devel@vger.kernel.org
S: Supported S: Supported
W: http://ceph.com/ W: http://ceph.com/

View File

@ -2376,7 +2376,9 @@ static int process_auth_done(struct ceph_connection *con, void *p, void *end)
ceph_decode_64_safe(&p, end, global_id, bad); ceph_decode_64_safe(&p, end, global_id, bad);
ceph_decode_32_safe(&p, end, con->v2.con_mode, bad); ceph_decode_32_safe(&p, end, con->v2.con_mode, bad);
ceph_decode_32_safe(&p, end, payload_len, bad); ceph_decode_32_safe(&p, end, payload_len, bad);
ceph_decode_need(&p, end, payload_len, bad);
dout("%s con %p global_id %llu con_mode %d payload_len %d\n", dout("%s con %p global_id %llu con_mode %d payload_len %d\n",
__func__, con, global_id, con->v2.con_mode, payload_len); __func__, con, global_id, con->v2.con_mode, payload_len);

View File

@ -1417,7 +1417,7 @@ static int mon_handle_auth_done(struct ceph_connection *con,
if (!ret) if (!ret)
finish_hunting(monc); finish_hunting(monc);
mutex_unlock(&monc->mutex); mutex_unlock(&monc->mutex);
return 0; return ret;
} }
static int mon_handle_auth_bad_method(struct ceph_connection *con, static int mon_handle_auth_bad_method(struct ceph_connection *con,

View File

@ -1586,6 +1586,7 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
struct ceph_pg_pool_info *pi; struct ceph_pg_pool_info *pi;
struct ceph_pg pgid, last_pgid; struct ceph_pg pgid, last_pgid;
struct ceph_osds up, acting; struct ceph_osds up, acting;
bool should_be_paused;
bool is_read = t->flags & CEPH_OSD_FLAG_READ; bool is_read = t->flags & CEPH_OSD_FLAG_READ;
bool is_write = t->flags & CEPH_OSD_FLAG_WRITE; bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
bool force_resend = false; bool force_resend = false;
@ -1654,10 +1655,16 @@ static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
&last_pgid)) &last_pgid))
force_resend = true; force_resend = true;
if (t->paused && !target_should_be_paused(osdc, t, pi)) { should_be_paused = target_should_be_paused(osdc, t, pi);
t->paused = false; if (t->paused && !should_be_paused) {
unpaused = true; unpaused = true;
} }
if (t->paused != should_be_paused) {
dout("%s t %p paused %d -> %d\n", __func__, t, t->paused,
should_be_paused);
t->paused = should_be_paused;
}
legacy_change = ceph_pg_compare(&t->pgid, &pgid) || legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
ceph_osds_changed(&t->acting, &acting, ceph_osds_changed(&t->acting, &acting,
t->used_replica || any_change); t->used_replica || any_change);
@ -4281,6 +4288,9 @@ static void osd_fault(struct ceph_connection *con)
goto out_unlock; goto out_unlock;
} }
osd->o_sparse_op_idx = -1;
ceph_init_sparse_read(&osd->o_sparse_read);
if (!reopen_osd(osd)) if (!reopen_osd(osd))
kick_osd_requests(osd); kick_osd_requests(osd);
maybe_request_map(osdc); maybe_request_map(osdc);

View File

@ -241,22 +241,26 @@ static struct crush_choose_arg_map *alloc_choose_arg_map(void)
static void free_choose_arg_map(struct crush_choose_arg_map *arg_map) static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
{ {
if (arg_map) { int i, j;
int i, j;
WARN_ON(!RB_EMPTY_NODE(&arg_map->node)); if (!arg_map)
return;
WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
if (arg_map->args) {
for (i = 0; i < arg_map->size; i++) { for (i = 0; i < arg_map->size; i++) {
struct crush_choose_arg *arg = &arg_map->args[i]; struct crush_choose_arg *arg = &arg_map->args[i];
if (arg->weight_set) {
for (j = 0; j < arg->weight_set_size; j++) for (j = 0; j < arg->weight_set_size; j++)
kfree(arg->weight_set[j].weights); kfree(arg->weight_set[j].weights);
kfree(arg->weight_set); kfree(arg->weight_set);
}
kfree(arg->ids); kfree(arg->ids);
} }
kfree(arg_map->args); kfree(arg_map->args);
kfree(arg_map);
} }
kfree(arg_map);
} }
DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index, DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
@ -1979,11 +1983,13 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, bool msgr2,
sizeof(u64) + sizeof(u32), e_inval); sizeof(u64) + sizeof(u32), e_inval);
ceph_decode_copy(p, &fsid, sizeof(fsid)); ceph_decode_copy(p, &fsid, sizeof(fsid));
epoch = ceph_decode_32(p); epoch = ceph_decode_32(p);
BUG_ON(epoch != map->epoch+1);
ceph_decode_copy(p, &modified, sizeof(modified)); ceph_decode_copy(p, &modified, sizeof(modified));
new_pool_max = ceph_decode_64(p); new_pool_max = ceph_decode_64(p);
new_flags = ceph_decode_32(p); new_flags = ceph_decode_32(p);
if (epoch != map->epoch + 1)
goto e_inval;
/* full map? */ /* full map? */
ceph_decode_32_safe(p, end, len, e_inval); ceph_decode_32_safe(p, end, len, e_inval);
if (len > 0) { if (len > 0) {