Skip to content

Commit 858c697

Browse files
committed
std: make RwLock test less intensive
This test called `yield` 80,000 times, which is nothing on a system with little load, but murder on a CI system. macOS' scheduler in particular doesn't seem to deal with this very well. The `yield` calls also weren't even necessarily doing what they were meant to: if the optimizer could figure out that it doesn't clobber some memory, then it could happily reorder around the `yield`s anyway! The test has been simplified and made to work better, and the number of yields have been reduced. The number of overall iterations has also been reduced, because with the `yield` calls making races very likely, we don't really need to run too many iterations to be confident that the implementation is race-free.
1 parent b312da1 commit 858c697

File tree

1 file changed

+56
-51
lines changed

1 file changed

+56
-51
lines changed

lib/std/Thread/RwLock.zig

Lines changed: 56 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -301,81 +301,86 @@ test "concurrent access" {
301301

302302
const num_writers: usize = 2;
303303
const num_readers: usize = 4;
304-
const num_writes: usize = 4096;
305-
const num_reads: usize = 8192;
304+
const num_writes: usize = 1000;
305+
const num_reads: usize = 2000;
306306

307307
const Runner = struct {
308-
const Self = @This();
308+
const Runner = @This();
309309

310-
rwl: RwLock = .{},
311-
writes: usize = 0,
312-
reads: std.atomic.Value(usize) = std.atomic.Value(usize).init(0),
310+
rwl: RwLock,
311+
writes: usize,
312+
reads: std.atomic.Value(usize),
313313

314-
term1: usize = 0,
315-
term2: usize = 0,
316-
term_sum: usize = 0,
314+
val_a: usize,
315+
val_b: usize,
317316

318-
fn reader(self: *Self) !void {
317+
fn reader(run: *Runner, thread_idx: usize) !void {
318+
var prng = std.Random.DefaultPrng.init(thread_idx);
319+
const rnd = prng.random();
319320
while (true) {
320-
self.rwl.lockShared();
321-
defer self.rwl.unlockShared();
321+
run.rwl.lockShared();
322+
defer run.rwl.unlockShared();
322323

323-
if (self.writes >= num_writes or self.reads.load(.unordered) >= num_reads)
324-
break;
324+
try testing.expect(run.writes <= num_writes);
325+
if (run.reads.fetchAdd(1, .monotonic) >= num_reads) break;
325326

326-
try self.check();
327+
// We use `volatile` accesses so that we can make sure the memory is accessed either
328+
// side of a yield, maximising chances of a race.
329+
const a_ptr: *const volatile usize = &run.val_a;
330+
const b_ptr: *const volatile usize = &run.val_b;
327331

328-
_ = self.reads.fetchAdd(1, .monotonic);
332+
const old_a = a_ptr.*;
333+
if (rnd.boolean()) try std.Thread.yield();
334+
const old_b = b_ptr.*;
335+
try testing.expect(old_a == old_b);
329336
}
330337
}
331338

332-
fn writer(self: *Self, thread_idx: usize) !void {
339+
fn writer(run: *Runner, thread_idx: usize) !void {
333340
var prng = std.Random.DefaultPrng.init(thread_idx);
334-
var rnd = prng.random();
335-
341+
const rnd = prng.random();
336342
while (true) {
337-
self.rwl.lock();
338-
defer self.rwl.unlock();
343+
run.rwl.lock();
344+
defer run.rwl.unlock();
339345

340-
if (self.writes >= num_writes)
341-
break;
346+
try testing.expect(run.writes <= num_writes);
347+
if (run.writes == num_writes) break;
342348

343-
try self.check();
349+
// We use `volatile` accesses so that we can make sure the memory is accessed either
350+
// side of a yield, maximising chances of a race.
351+
const a_ptr: *volatile usize = &run.val_a;
352+
const b_ptr: *volatile usize = &run.val_b;
344353

345-
const term1 = rnd.int(usize);
346-
self.term1 = term1;
347-
try std.Thread.yield();
354+
const new_val = rnd.int(usize);
348355

349-
const term2 = rnd.int(usize);
350-
self.term2 = term2;
351-
try std.Thread.yield();
356+
const old_a = a_ptr.*;
357+
a_ptr.* = new_val;
358+
if (rnd.boolean()) try std.Thread.yield();
359+
const old_b = b_ptr.*;
360+
b_ptr.* = new_val;
361+
try testing.expect(old_a == old_b);
352362

353-
self.term_sum = term1 +% term2;
354-
self.writes += 1;
363+
run.writes += 1;
355364
}
356365
}
357-
358-
fn check(self: *const Self) !void {
359-
const term_sum = self.term_sum;
360-
try std.Thread.yield();
361-
362-
const term2 = self.term2;
363-
try std.Thread.yield();
364-
365-
const term1 = self.term1;
366-
try testing.expectEqual(term_sum, term1 +% term2);
367-
}
368366
};
369367

370-
var runner = Runner{};
371-
var threads: [num_writers + num_readers]std.Thread = undefined;
372-
373-
for (threads[0..num_writers], 0..) |*t, i| t.* = try std.Thread.spawn(.{}, Runner.writer, .{ &runner, i });
374-
for (threads[num_writers..]) |*t| t.* = try std.Thread.spawn(.{}, Runner.reader, .{&runner});
368+
var run: Runner = .{
369+
.rwl = .{},
370+
.writes = 0,
371+
.reads = .init(0),
372+
.val_a = 0,
373+
.val_b = 0,
374+
};
375+
var write_threads: [num_writers]std.Thread = undefined;
376+
var read_threads: [num_readers]std.Thread = undefined;
375377

376-
for (threads) |t| t.join();
378+
for (&write_threads, 0..) |*t, i| t.* = try .spawn(.{}, Runner.writer, .{ &run, i });
379+
for (&read_threads, num_writers..) |*t, i| t.* = try .spawn(.{}, Runner.reader, .{ &run, i });
377380

378-
try testing.expectEqual(num_writes, runner.writes);
381+
for (write_threads) |t| t.join();
382+
for (read_threads) |t| t.join();
379383

380-
//std.debug.print("reads={}\n", .{ runner.reads.load(.unordered)});
384+
try testing.expect(run.writes == num_writes);
385+
try testing.expect(run.reads.raw >= num_reads);
381386
}

0 commit comments

Comments
 (0)