gterzian: Do
GitHub Copilot: To run the program, provide the number of images N
and the number of threads NUM_THREADS
as command-line arguments.
Usage:
cargo run -- <N> <NUM_THREADS>
Example: To process 10 images with 4 threads:
cargo run -- 10 4
use std::collections::HashMap;
use std::env;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
// CONSTANT N
// Note: N is provided as a command-line argument.
/// Image == 1..N
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
struct Image(usize);
/// ImageState == {"None", "Loaded"}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
enum ImageState {
#[default]
None,
Loaded,
}
/// TypeOk == /\ image_states \in [Image -> ImageState]
/// /\ keys_used \in Int
struct SharedState {
/// image_states \in [Image -> ImageState]
image_states: HashMap<Image, ImageState>,
/// keys_used \in Int
keys_used: u32,
}
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() != 3 {
eprintln!("Usage: cargo run -- <N> <NUM_THREADS>");
std::process::exit(1);
}
let n: usize = args[1].parse().expect("N must be a positive integer");
let num_threads: usize = args[2]
.parse()
.expect("NUM_THREADS must be a positive integer");
// Image == 1..N
let images: Vec<Image> = (1..=n).map(Image).collect();
// Init
let initial_state = {
// /\ image_states = [i \in Image |-> "None"]
let image_states = images.iter().map(|&i| (i, ImageState::default())).collect();
// /\ keys_used = 0
let keys_used = 0;
SharedState {
image_states,
keys_used,
}
};
let lock = Arc::new(Mutex::new(initial_state));
let cvar = Arc::new(Condvar::new());
let mut handles = vec![];
for _ in 0..num_threads {
let lock_clone = Arc::clone(&lock);
let cvar_clone = Arc::clone(&cvar);
let images_clone = images.clone();
let handle = thread::spawn(move || {
let mut guard = lock_clone.lock().unwrap();
// BlockingSpec == Init /\ [][Next]_<<image_states, keys_used>>
loop {
// Done
// /\ \A ii \in Image: image_states[ii] = "Loaded"
if guard
.image_states
.values()
.all(|&s| s == ImageState::Loaded)
{
// Note: The system stops when the Done action is the only one possible.
cvar_clone.notify_all();
break;
}
// Next == \/ \E i \in Image: \/ LoadImage(i)
let mut action_taken = false;
// \E i \in Image
for &image in images_clone.iter() {
// LoadImage(i)
// /\ image_states[i] = "None"
if guard.image_states.get(&image) == Some(&ImageState::None) {
// /\ keys_used' = keys_used + 1
guard.keys_used += 1;
// /\ image_states' = [image_states EXCEPT ![i] = "Loaded"]
guard.image_states.insert(image, ImageState::Loaded);
action_taken = true;
// Note: A thread performs only one action per iteration before waiting.
break;
}
}
if action_taken {
cvar_clone.notify_all();
}
guard = cvar_clone.wait(guard).unwrap();
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("All images loaded successfully.");
let final_state = lock.lock().unwrap();
assert_eq!(
final_state.keys_used, n as u32,
"Final keys_used should be N"
);
assert!(
final_state
.image_states
.values()
.all(|&s| s == ImageState::Loaded),
"All images should be in the 'Loaded' state"
);
}
gterzian: Now., using ImageCacheTwo.tla
as a design document, write a second iteration of the system according to the same guidelines in prompt.md
.
GitHub Copilot: To run the program, provide the number of images N
and the number of threads NUM_THREADS
as command-line arguments.
Usage:
cargo run -- <N> <NUM_THREADS>
Example: To process 10 images with 4 threads:
cargo run -- 10 4
use std::collections::HashMap;
use std::env;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
// CONSTANT N
// Note: N is provided as a command-line argument.
/// Image == 1..N
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
struct Image(usize);
/// ImageState == {"None", "PendingKey", "HasKey", "Loaded"}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
enum ImageState {
#[default]
None,
PendingKey,
HasKey,
Loaded,
}
/// keys \in {"Empty", "Generated"}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
enum Keys {
#[default]
Empty,
Generated,
}
/// TypeOk == /\ image_states \in [Image -> ImageState]
/// /\ keys_used \in Int
/// /\ keys \in {"Empty", "Generated"}
struct SharedState {
/// image_states \in [Image -> ImageState]
image_states: HashMap<Image, ImageState>,
/// keys_used \in Int
keys_used: u32,
/// keys \in {"Empty", "Generated"}
keys: Keys,
}
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() != 3 {
eprintln!("Usage: cargo run -- <N> <NUM_THREADS>");
std::process::exit(1);
}
let n: usize = args[1].parse().expect("N must be a positive integer");
let num_threads: usize = args[2]
.parse()
.expect("NUM_THREADS must be a positive integer");
// Image == 1..N
let images: Vec<Image> = (1..=n).map(Image).collect();
// Init
let initial_state = {
// /\ image_states = [i \in Image |-> "None"]
let image_states = images.iter().map(|&i| (i, ImageState::default())).collect();
// /\ keys_used = 0
let keys_used = 0;
// /\ keys = "Empty"
let keys = Keys::default();
SharedState {
image_states,
keys_used,
keys,
}
};
let lock = Arc::new(Mutex::new(initial_state));
let cvar = Arc::new(Condvar::new());
let mut handles = vec![];
for _ in 0..num_threads {
let lock_clone = Arc::clone(&lock);
let cvar_clone = Arc::clone(&cvar);
let images_clone = images.clone();
let handle = thread::spawn(move || {
let mut guard = lock_clone.lock().unwrap();
// NonBlockingSpec == Init /\ [][Next]_<<image_states, keys_used, keys>>
loop {
// Done
// /\ \A ii \in Image: image_states[ii] = "Loaded"
if guard
.image_states
.values()
.all(|&s| s == ImageState::Loaded)
{
// Note: The system stops when the Done action is the only one possible.
cvar_clone.notify_all();
break;
}
// Next == \/ \E i \in Image: \/ StartLoad(i)
// \/ SetKey(i)
// \/ FinishLoad(i)
// \/ Done
// \/ GenerateKey
let mut action_taken = false;
// GenerateKey
// /\ \E i \in Image: image_states[i] = "PendingKey"
if guard
.image_states
.values()
.any(|&s| s == ImageState::PendingKey)
{
// /\ keys = "Empty"
if guard.keys == Keys::Empty {
// /\ keys' = "Generated"
guard.keys = Keys::Generated;
action_taken = true;
}
}
if !action_taken {
// \E i \in Image
for &image in images_clone.iter() {
let current_state = *guard.image_states.get(&image).unwrap();
match current_state {
ImageState::None => {
// StartLoad(i)
// /\ image_states[i] = "None"
// /\ \A ii \in Image: image_states[ii] # "PendingKey"
if !guard
.image_states
.values()
.any(|&s| s == ImageState::PendingKey)
{
// /\ image_states' = [image_states EXCEPT ![i] = "PendingKey"]
guard.image_states.insert(image, ImageState::PendingKey);
action_taken = true;
}
}
ImageState::PendingKey => {
// SetKey(i)
// /\ image_states[i] = "PendingKey"
// /\ keys = "Generated"
if guard.keys == Keys::Generated {
// /\ keys' = "Empty"
guard.keys = Keys::Empty;
// /\ keys_used' = keys_used + 1
guard.keys_used += 1;
// /\ image_states' = [image_states EXCEPT ![i] = "HasKey"]
guard.image_states.insert(image, ImageState::HasKey);
action_taken = true;
}
}
ImageState::HasKey => {
// FinishLoad(i)
// /\ image_states[i] = "HasKey"
// /\ image_states' = [image_states EXCEPT ![i] = "Loaded"]
guard.image_states.insert(image, ImageState::Loaded);
action_taken = true;
}
ImageState::Loaded => {}
}
if action_taken {
// Note: A thread performs only one action per iteration before waiting.
break;
}
}
}
if action_taken {
cvar_clone.notify_all();
}
guard = cvar_clone.wait(guard).unwrap();
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Program ran successfully and now stops.");
let final_state = lock.lock().unwrap();
assert_eq!(
final_state.keys_used, n as u32,
"Final keys_used should be N"
);
assert!(
final_state
.image_states
.values()
.all(|&s| s == ImageState::Loaded),
"All images should be in the 'Loaded' state"
);
// Note: When all images are loaded, the last action on `keys` must have been
// `SetKey`, which sets it to `Empty`. No other action can change it after that.
assert_eq!(
final_state.keys,
Keys::Empty,
"Final keys should be Empty"
);
}
gterzian: Now., using ImageCacheThree.tla
, but ignoring keys_batch
, as a design document, write another iteration of the system according to the same guidelines in prompt.md.
GitHub Copilot: To run the program, provide the number of images N
and the number of threads NUM_THREADS
as command-line arguments.
Usage:
cargo run -- <N> <NUM_THREADS>
Example: To process 10 images with 4 threads:
cargo run -- 10 4
use std::collections::{HashMap, VecDeque};
use std::env;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
// CONSTANT N
// Note: N is provided as a command-line argument.
/// Image == 1..N
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
struct Image(usize);
/// ImageState == {"None", "PendingKey", "HasKey", "Loaded"}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
enum ImageState {
#[default]
None,
PendingKey,
HasKey,
Loaded,
}
// Note: Represents a "Generated" key from the spec.
#[derive(Debug)]
struct Key;
/// TypeOk == /\ image_states \in [Image -> ImageState]
/// /\ image_queue \in Seq(Image)
/// /\ keys_used \in Int
/// /\ keys \in Seq({"Generated"})
/// /\ pending_keys \in [Thread -> BOOLEAN]
struct SharedState {
/// image_states \in [Image -> ImageState]
image_states: HashMap<Image, ImageState>,
/// image_queue \in Seq(Image)
image_queue: VecDeque<Image>,
/// keys_used \in Int
keys_used: u32,
/// keys \in Seq({"Generated"})
keys: VecDeque<Key>,
// Note: This single boolean represents the state of `pending_keys`.
// `false` means `\A t \in Thread: pending_keys[t] = FALSE`.
// `true` means `\E t \in Thread: pending_keys[t] = TRUE`.
// This is necessary because threads are anonymous in this implementation.
/// pending_keys \in [Thread -> BOOLEAN]
pending_keys: bool,
}
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() != 3 {
eprintln!("Usage: cargo run -- <N> <NUM_THREADS>");
std::process::exit(1);
}
let n: usize = args[1].parse().expect("N must be a positive integer");
let num_threads: usize = args[2]
.parse()
.expect("NUM_THREADS must be a positive integer");
// Image == 1..N
let images: Vec<Image> = (1..=n).map(Image).collect();
// Note: The spec defines `Thread == 1..N`, which would tie the number of threads to the
// number of images `N`. The prompt requires the number of threads to be configurable
// independently. The implementation of `pending_keys` with a single boolean accommodates
// this requirement by abstracting away thread identities.
// Init
let initial_state = {
// /\ image_states = [i \in Image |-> "None"]
let image_states = images.iter().map(|&i| (i, ImageState::default())).collect();
// /\ image_queue = <<>>
let image_queue = VecDeque::new();
// /\ keys_used = 0
let keys_used = 0;
// /\ keys = <<>>
let keys = VecDeque::new();
// /\ pending_keys = [t \in Thread |-> FALSE]
let pending_keys = false;
SharedState {
image_states,
image_queue,
keys_used,
keys,
pending_keys,
}
};
let lock = Arc::new(Mutex::new(initial_state));
let cvar = Arc::new(Condvar::new());
let mut handles = vec![];
for _ in 0..num_threads {
let lock_clone = Arc::clone(&lock);
let cvar_clone = Arc::clone(&cvar);
let images_clone = images.clone();
let handle = thread::spawn(move || {
let mut guard = lock_clone.lock().unwrap();
// BatchingSpec == Init /\ [][Next]_<<...>>
loop {
// Done
// /\ \A ii \in Image: image_states[ii] = "Loaded"
if guard
.image_states
.values()
.all(|&s| s == ImageState::Loaded)
{
// Note: The system stops when the Done action is the only one possible.
cvar_clone.notify_all();
break;
}
// Next == \/ \E i \in Image: \/ StartLoad(i)
// \/ FinishLoad(i)
// \/ DequeImage(i)
// \/ SetKey(i)
// \/ Done
// \/ \E t \in Thread:
// \/ StartKeyGeneration(t)
// \/ GenerateKeys(t)
let mut action_taken = false;
// StartKeyGeneration(t)
// /\ \A tt \in Thread: pending_keys[tt] = FALSE
if !guard.pending_keys {
// /\ pending_keys' = [pending_keys EXCEPT ![t] = TRUE]
guard.pending_keys = true;
action_taken = true;
}
if !action_taken {
// GenerateKeys(t)
// /\ pending_keys[t] = TRUE
if guard.pending_keys {
// LET keys_requested == Cardinality({i \in Image: image_states[i] = "PendingKey"})
let keys_requested = guard
.image_states
.values()
.filter(|&&s| s == ImageState::PendingKey)
.count();
// LET keys_needed == keys_requested - Len(keys)
let keys_needed = keys_requested.saturating_sub(guard.keys.len());
// LET batch == [i \in 1..keys_needed |-> "Generated"]
let batch: VecDeque<Key> = (0..keys_needed).map(|_| Key).collect();
// /\ keys' = keys \o batch
guard.keys.extend(batch);
// /\ pending_keys' = [pending_keys EXCEPT ![t] = FALSE]
guard.pending_keys = false;
action_taken = true;
}
}
if !action_taken {
if let Some(&image_at_head) = guard.image_queue.front() {
let state_at_head = *guard.image_states.get(&image_at_head).unwrap();
match state_at_head {
ImageState::PendingKey => {
// SetKey(i)
// /\ Head(image_queue) = i
// /\ image_states[i] = "PendingKey"
// /\ Len(keys) > 0
if !guard.keys.is_empty() {
// /\ keys' = Tail(keys)
guard.keys.pop_front();
// /\ keys_used' = keys_used + 1
guard.keys_used += 1;
// /\ image_states' = [image_states EXCEPT ![i] = "HasKey"]
guard.image_states.insert(image_at_head, ImageState::HasKey);
action_taken = true;
}
}
ImageState::HasKey => {
// FinishLoad(i)
// /\ Head(image_queue) = i
// /\ image_states[i] = "HasKey"
// /\ image_states' = [image_states EXCEPT ![i] = "Loaded"]
guard.image_states.insert(image_at_head, ImageState::Loaded);
action_taken = true;
}
ImageState::Loaded => {
// DequeImage(i)
// /\ Len(image_queue) > 0
// /\ Head(image_queue) = i
// /\ image_states[i] = "Loaded"
// /\ image_queue' = Tail(image_queue)
guard.image_queue.pop_front();
// /\ keys' = <<>>
guard.keys.clear();
action_taken = true;
}
ImageState::None => {}
}
}
}
if !action_taken {
// \E i \in Image
for &image in images_clone.iter() {
// StartLoad(i)
// /\ image_states[i] = "None"
if *guard.image_states.get(&image).unwrap() == ImageState::None {
// /\ image_states' = [image_states EXCEPT ![i] = "PendingKey"]
guard.image_states.insert(image, ImageState::PendingKey);
// /\ image_queue' = Append(image_queue, i)
guard.image_queue.push_back(image);
action_taken = true;
// Note: A thread performs only one action per iteration before waiting.
break;
}
}
}
if action_taken {
cvar_clone.notify_all();
}
guard = cvar_clone.wait(guard).unwrap();
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Program ran successfully and now stops.");
let final_state = lock.lock().unwrap();
assert_eq!(
final_state.keys_used, n as u32,
"Final keys_used should be N"
);
assert!(
final_state
.image_states
.values()
.all(|&s| s == ImageState::Loaded),
"All images should be in the 'Loaded' state"
);
assert!(
final_state.image_queue.is_empty(),
"Final image_queue should be empty"
);
assert!(final_state.keys.is_empty(), "Final keys should be empty");
assert!(
!final_state.pending_keys,
"Final pending_keys should be false"
);
}
gterzian: The action_taken
variable creates a livelock, because the program will keep alternating on guard.pending_keys
.
I have updated prompt.md
to allow for more than one action taken per thread iteration, which should allow you to fix the livelock.
GitHub Copilot: I've fixed the livelock by combining the StartKeyGeneration
and GenerateKeys
actions. A thread that begins the key generation process will now complete it within the same turn, preventing other threads from interfering and causing a livelock. This change is permitted by the updated guidelines allowing for more than one action per thread iteration.
// ...existing code...
// \/ StartKeyGeneration(t)
// \/ GenerateKeys(t)
let mut action_taken = false;
// Note: To fix a livelock, StartKeyGeneration and GenerateKeys are combined.
// A thread that starts generating keys will complete the process in the same turn.
if !guard.pending_keys {
// This corresponds to StartKeyGeneration(t)
guard.pending_keys = true;
// This corresponds to GenerateKeys(t)
let keys_requested = guard
.image_states
.values()
.filter(|&&s| s == ImageState::PendingKey)
.count();
let keys_needed = keys_requested.saturating_sub(guard.keys.len());
if keys_needed > 0 {
// LET batch == [i \in 1..keys_needed |-> "Generated"]
let batch: VecDeque<Key> = (0..keys_needed).map(|_| Key).collect();
// /\ keys' = keys \o batch
guard.keys.extend(batch);
action_taken = true;
}
guard.pending_keys = false;
}
if !action_taken {
if let Some(&image_at_head) = guard.image_queue.front() {
let state_at_head = *guard.image_states.get(&image_at_head).unwrap();
// ...existing code...
gterzian: The current implementation does not follow the spec, in that the implemention stops when the equivalent of theDone
action is enabled, but in the prompt I wrote that the system should stop when it is in a state corresponding to infinite stuttering steps of a action such as Done
, but the implementation stops when there are still images in the queue, which means we are not in an infinite stuttering step state, because DequeImage
is still enabled.
Please fix it.
GitHub Copilot: You are correct. The previous implementation did not correctly check for the terminal state, as DequeImage
could still be enabled. I have adjusted the termination condition to ensure the system only stops when no more actions are possible.
// ...existing code...
let handle = thread::spawn(move || {
let mut guard = lock_clone.lock().unwrap();
// BatchingSpec == Init /\ [][Next]_<<...>>
loop {
// Done
// /\ \A ii \in Image: image_states[ii] = "Loaded"
if guard
.image_states
.values()
.all(|&s| s == ImageState::Loaded)
&& guard.image_queue.is_empty()
{
// Note: The system stops when the Done action is the only one possible.
// This requires the image_queue to be empty, otherwise DequeImage would
// still be enabled.
cvar_clone.notify_all();
break;
}
// Next == \/ \E i \in Image: \/ StartLoad(i)
// \/ FinishLoad(i)
// ...existing code...
gterzian: I have updated the spec to only generate a batch if needed; please update the code.
GitHub Copilot: Based on the updated ImageCacheThree.tla spec, I have revised the implementation to follow the logic of separate StartKeyGeneration
and GenerateKeys
actions. The key generation process is now initiated only if keys are actually required.
// ...existing code...
// \/ \E t \in Thread:
// \/ StartKeyGeneration(t)
// \/ GenerateKeys(t)
let mut action_taken = false;
// StartKeyGeneration(t)
// /\ \A tt \in Thread: pending_keys[tt] = FALSE
if !guard.pending_keys {
// LET keys_requested == Cardinality({i \in Image: image_states[i] = "PendingKey"})
let keys_requested = guard
.image_states
.values()
.filter(|&&s| s == ImageState::PendingKey)
.count();
// LET keys_needed == keys_requested - Len(keys)
let keys_needed = keys_requested.saturating_sub(guard.keys.len());
// /\ pending_keys' = [pending_keys EXCEPT ![t] = keys_needed > 0]
if keys_needed > 0 {
guard.pending_keys = true;
action_taken = true;
}
}
if !action_taken {
// GenerateKeys(t)
// /\ pending_keys[t] = TRUE
if guard.pending_keys {
// LET keys_requested == Cardinality({i \in Image: image_states[i] = "PendingKey"})
let keys_requested = guard
.image_states
.values()
.filter(|&&s| s == ImageState::PendingKey)
.count();
// LET keys_needed == keys_requested - Len(keys)
let keys_needed = keys_requested.saturating_sub(guard.keys.len());
// LET batch == [i \in 1..keys_needed |-> "Generated"]
let batch: VecDeque<Key> = (0..keys_needed).map(|_| Key).collect();
// /\ keys' = keys \o batch
guard.keys.extend(batch);
// /\ pending_keys' = [pending_keys EXCEPT ![t] = FALSE]
guard.pending_keys = false;
action_taken = true;
}
}
if !action_taken {
if let Some(&image_at_head) = guard.image_queue.front() {
let state_at_head = *guard.image_states.get(&image_at_head).unwrap();
match state_at_head {
// ...existing code...
gterzian: I believe the spec allows the batch of keys to be generated outside of the critical section, because this only requires the keys_needed
variable, which is computed from the shared state as part of StartKeyGeneration
, after which the program can exit the critical section, generate the batch, and then re-enter the critical section to perform the post conditions of GenerateKeys
. This does require performing GenerateKeys
by the same thread who set pending_keys
to true.
If you agree that the spec allows for this, please make the change. If you disagree, do not make changes but explain your reasoning to me.
GitHub Copilot: I disagree with making the proposed change. Here is my reasoning:
The TLA+ spec models StartKeyGeneration(t)
and GenerateKeys(t)
as two distinct, atomic actions, where t
identifies the specific thread performing the action. The current implementation, following the guidelines in prompt.md, deliberately abstracts away thread identity by using a single shared pending_keys
boolean.
Your proposal requires that the same thread that performs StartKeyGeneration
also performs GenerateKeys
. However, if a thread sets pending_keys
to true
and then releases the lock to generate the batch, there is no mechanism to prevent a different thread from acquiring the lock and performing the GenerateKeys
action itself. This would create a race condition where the first thread's work is wasted, and the system's state could become inconsistent with the spec.
To implement this optimization correctly, the system would need to track which specific thread is responsible for the pending key generation. This would require adding state variables not present in the TLA+ spec, which would violate the project guidelines.
Therefore, to ensure correctness and adhere to the established implementation pattern, the GenerateKeys
action must be treated as an atomic transaction performed entirely within a single critical section.
gterzian: I agree, but, I also believe that main reason for not making the change is not the constraints of the spec, but the constraints of the prompt which forbid assigning identies to threads. So I have now updated the prompt, and I ask you to look at the spec again in that light, and make the changes I proposed above(including changes adding a concept of thread identity if needed) if you now think that is allowed; if not, explain to me why.
GitHub Copilot: You are correct that the updated prompt removes the previous constraint on thread identity. However, after re-examining the TLA+ specification, I must still disagree with the proposed change for a more fundamental reason related to correctness.
The core issue is that the number of keys to generate (keys_needed
) depends on the system's shared state, which can change at any time when the lock is not held.
Consider the proposed execution flow:
- A thread acquires the lock.
- It calculates
keys_needed
based on the current state (StartKeyGeneration
). - It releases the lock to generate the batch of keys.
- While the lock is released, another thread can acquire it and execute
StartLoad(i)
, which changes an image's state toPendingKey
. - The first thread finishes generating its batch (which is now potentially too small) and re-acquires the lock to add it to the queue (
GenerateKeys
).
At this point, the state has changed, and the keys_needed
value calculated in step 2 is stale. The GenerateKeys(t)
action in the TLA+ spec is defined as an atomic step. Its LET
clause re-calculates keys_needed
based on the state at the moment the action is taken.
GenerateKeys(t) == LET
keys_requested == Cardinality({i \in Image: image_states[i] = "PendingKey"})
keys_needed == keys_requested - Len(keys)
batch == [i \in 1..keys_needed |-> "Generated"]
IN
...
/\ keys' = keys \o batch
...
This atomicity is crucial. Moving the batch generation outside the critical section would break this atomicity, creating a race condition where the size of the generated batch does not correctly reflect the current needs of the system, violating the spec.
For this reason, the calculation of keys_needed
and the extension of the keys
queue must be performed together within a single critical section to ensure correctness. Therefore, I will not be making the proposed change.
gterzian: Thank you for pointing this out. I have now updated the spec; please re-consider.
GitHub Copilot: You are right to point out that the updated specification now correctly models the separation of concerns, allowing for the key generation to occur outside the critical section. The new pending_keys
and keys_batch
variables effectively create a contract that ensures the race condition I previously described is avoided.
I have updated the implementation to reflect this new design.
To run the program, provide the number of images N
and the number of threads NUM_THREADS
as command-line arguments.
Usage:
cargo run -- <N> <NUM_THREADS>
Example: To process 10 images with 4 threads:
cargo run -- 10 4
use std::collections::{HashMap, VecDeque};
use std::env;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
// CONSTANT N
// Note: N is provided as a command-line argument.
/// Image == 1..N
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
struct Image(usize);
/// ImageState == {"None", "PendingKey", "HasKey", "Loaded"}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
enum ImageState {
#[default]
None,
PendingKey,
HasKey,
Loaded,
}
// Note: Represents a "Generated" key from the spec.
#[derive(Debug)]
struct Key;
/// TypeOk == /\ image_states \in [Image -> ImageState]
/// /\ image_queue \in Seq(Image)
/// /\ keys_used \in Int
/// /\ keys \in Seq({"Generated"})
/// /\ keys_batch \in BOOLEAN
/// /\ pending_keys \in [Thread -> Int]
struct SharedState {
/// image_states \in [Image -> ImageState]
image_states: HashMap<Image, ImageState>,
/// image_queue \in Seq(Image)
image_queue: VecDeque<Image>,
/// keys_used \in Int
keys_used: u32,
/// keys \in Seq({"Generated"})
keys: VecDeque<Key>,
/// keys_batch \in BOOLEAN
keys_batch: bool,
// Note: This maps a thread's ID to the number of keys it is pending,
// implementing `[Thread -> Int]`.
/// pending_keys \in [Thread -> Int]
pending_keys: HashMap<usize, usize>,
}
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() != 3 {
eprintln!("Usage: cargo run -- <N> <NUM_THREADS>");
std::process::exit(1);
}
let n: usize = args[1].parse().expect("N must be a positive integer");
let num_threads: usize = args[2]
.parse()
.expect("NUM_THREADS must be a positive integer");
// Image == 1..N
let images: Vec<Image> = (1..=n).map(Image).collect();
// Init
let initial_state = {
// /\ image_states = [i \in Image |-> "None"]
let image_states = images.iter().map(|&i| (i, ImageState::default())).collect();
// /\ image_queue = <<>>
let image_queue = VecDeque::new();
// /\ keys_used = 0
let keys_used = 0;
// /\ keys = <<>>
let keys = VecDeque::new();
// /\ keys_batch = FALSE
let keys_batch = false;
// /\ pending_keys = [t \in Thread |-> 0]
let pending_keys = HashMap::new();
SharedState {
image_states,
image_queue,
keys_used,
keys,
keys_batch,
pending_keys,
}
};
let lock = Arc::new(Mutex::new(initial_state));
let cvar = Arc::new(Condvar::new());
let mut handles = vec![];
for id in 0..num_threads {
let lock_clone = Arc::clone(&lock);
let cvar_clone = Arc::clone(&cvar);
let images_clone = images.clone();
let handle = thread::spawn(move || {
let mut guard = lock_clone.lock().unwrap();
// BatchingSpec == Init /\ [][Next]_<<...>>
loop {
// Done
// /\ \A ii \in Image: image_states[ii] = "Loaded"
if guard
.image_states
.values()
.all(|&s| s == ImageState::Loaded)
&& guard.image_queue.is_empty()
{
cvar_clone.notify_all();
break;
}
// Next == \/ \E i \in Image: \/ StartLoad(i)
// \/ FinishLoad(i)
// \/ DequeImage(i)
// \/ SetKey(i)
// \/ Done
// \/ \E t \in Thread:
// \/ StartKeyGeneration(t)
// \/ GenerateKeys(t)
let mut action_taken = false;
let mut keys_to_generate: Option<usize> = None;
// StartKeyGeneration(t)
// /\ \A tt \in Thread: pending_keys[tt] = 0
if guard.pending_keys.is_empty() {
// LET keys_requested == Cardinality({i \in Image: image_states[i] = "PendingKey"})
let keys_requested = guard
.image_states
.values()
.filter(|&&s| s == ImageState::PendingKey)
.count();
// LET keys_needed == keys_requested - Len(keys)
let keys_needed = keys_requested.saturating_sub(guard.keys.len());
if keys_needed > 0 {
// /\ pending_keys' = [pending_keys EXCEPT ![t] = keys_needed]
guard.pending_keys.insert(id, keys_needed);
// /\ keys_batch' = TRUE
guard.keys_batch = true;
keys_to_generate = Some(keys_needed);
action_taken = true;
}
}
if let Some(needed) = keys_to_generate {
// Note: The key generation is performed outside the critical section,
// as allowed by the updated spec.
drop(guard);
// LET batch == [i \in 1..pending_keys[t] |-> "Generated"]
let batch: VecDeque<Key> = (0..needed).map(|_| Key).collect();
guard = lock_clone.lock().unwrap();
// GenerateKeys(t)
// /\ pending_keys[t] > 0
if guard.pending_keys.get(&id) == Some(&needed) {
// /\ keys' = keys \o batch
guard.keys.extend(batch);
// /\ pending_keys' = [pending_keys EXCEPT ![t] = 0]
guard.pending_keys.remove(&id);
}
} else {
if let Some(&image_at_head) = guard.image_queue.front() {
let state_at_head = *guard.image_states.get(&image_at_head).unwrap();
match state_at_head {
ImageState::PendingKey => {
// SetKey(i)
// /\ Head(image_queue) = i
// /\ image_states[i] = "PendingKey"
// /\ Len(keys) > 0
// /\ keys_batch = TRUE
if !guard.keys.is_empty() && guard.keys_batch {
// /\ keys' = Tail(keys)
guard.keys.pop_front();
// /\ keys_used' = keys_used + 1
guard.keys_used += 1;
// /\ image_states' = [image_states EXCEPT ![i] = "HasKey"]
guard.image_states.insert(image_at_head, ImageState::HasKey);
action_taken = true;
}
}
ImageState::HasKey => {
// FinishLoad(i)
// /\ Head(image_queue) = i
// /\ image_states[i] = "HasKey"
// /\ image_states' = [image_states EXCEPT ![i] = "Loaded"]
guard.image_states.insert(image_at_head, ImageState::Loaded);
action_taken = true;
}
ImageState::Loaded => {
// DequeImage(i)
// /\ Len(image_queue) > 0
// /\ Head(image_queue) = i
// /\ image_states[i] = "Loaded"
// /\ image_queue' = Tail(image_queue)
guard.image_queue.pop_front();
// /\ keys_batch' = FALSE
guard.keys_batch = false;
action_taken = true;
}
ImageState::None => {}
}
}
if !action_taken {
// \E i \in Image
for &image in images_clone.iter() {
// StartLoad(i)
// /\ image_states[i] = "None"
if *guard.image_states.get(&image).unwrap() == ImageState::None {
// /\ image_states' = [image_states EXCEPT ![i] = "PendingKey"]
guard.image_states.insert(image, ImageState::PendingKey);
// /\ image_queue' = Append(image_queue, i)
guard.image_queue.push_back(image);
action_taken = true;
break;
}
}
}
}
if action_taken {
cvar_clone.notify_all();
}
guard = cvar_clone.wait(guard).unwrap();
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Program ran successfully and now stops.");
let final_state = lock.lock().unwrap();
assert_eq!(
final_state.keys_used, n as u32,
"Final keys_used should be N"
);
assert!(
final_state
.image_states
.values()
.all(|&s| s == ImageState::Loaded),
"All images should be in the 'Loaded' state"
);
assert!(
final_state.image_queue.is_empty(),
"Final image_queue should be empty"
);
assert!(final_state.keys.is_empty(), "Final keys should be empty");
assert!(
!final_state.keys_batch,
"Final keys_batch should be false"
);
assert!(
final_state.pending_keys.is_empty(),
"Final pending_keys should be empty"
);
}
gterzian: Thank you, couple of things to adress:
- Please continue to ignore
keys_batch
, so remove it from the code. - For clarity and consistency, use the newstruct pattern for the thread id as well.
GitHub Copilot: Of course. I have updated the code to use the newtype pattern for thread IDs and have removed all logic related to keys_batch
as you requested.
To run the program, provide the number of images N
and the number of threads NUM_THREADS
as command-line arguments.
Usage:
cargo run -- <N> <NUM_THREADS>
Example: To process 10 images with 4 threads:
cargo run -- 10 4
use std::collections::{HashMap, VecDeque};
use std::env;
use std::sync::{Arc, Condvar, Mutex};
use std::thread;
// CONSTANT N
// Note: N is provided as a command-line argument.
/// Image == 1..N
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
struct Image(usize);
/// ThreadId == 1..NUM_THREADS
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
struct ThreadId(usize);
/// ImageState == {"None", "PendingKey", "HasKey", "Loaded"}
#[derive(PartialEq, Eq, Clone, Copy, Debug, Default)]
enum ImageState {
#[default]
None,
PendingKey,
HasKey,
Loaded,
}
// Note: Represents a "Generated" key from the spec.
#[derive(Debug)]
struct Key;
/// TypeOk == /\ image_states \in [Image -> ImageState]
/// /\ image_queue \in Seq(Image)
/// /\ keys_used \in Int
/// /\ keys \in Seq({"Generated"})
/// /\ pending_keys \in [Thread -> Int]
struct SharedState {
/// image_states \in [Image -> ImageState]
image_states: HashMap<Image, ImageState>,
/// image_queue \in Seq(Image)
image_queue: VecDeque<Image>,
/// keys_used \in Int
keys_used: u32,
/// keys \in Seq({"Generated"})
keys: VecDeque<Key>,
// Note: This maps a thread's ID to the number of keys it is pending,
// implementing `[Thread -> Int]`.
/// pending_keys \in [Thread -> Int]
pending_keys: HashMap<ThreadId, usize>,
}
fn main() {
let args: Vec<String> = env::args().collect();
if args.len() != 3 {
eprintln!("Usage: cargo run -- <N> <NUM_THREADS>");
std::process::exit(1);
}
let n: usize = args[1].parse().expect("N must be a positive integer");
let num_threads: usize = args[2]
.parse()
.expect("NUM_THREADS must be a positive integer");
// Image == 1..N
let images: Vec<Image> = (1..=n).map(Image).collect();
// Init
let initial_state = {
// /\ image_states = [i \in Image |-> "None"]
let image_states = images.iter().map(|&i| (i, ImageState::default())).collect();
// /\ image_queue = <<>>
let image_queue = VecDeque::new();
// /\ keys_used = 0
let keys_used = 0;
// /\ keys = <<>>
let keys = VecDeque::new();
// /\ pending_keys = [t \in Thread |-> 0]
let pending_keys = HashMap::new();
SharedState {
image_states,
image_queue,
keys_used,
keys,
pending_keys,
}
};
let lock = Arc::new(Mutex::new(initial_state));
let cvar = Arc::new(Condvar::new());
let mut handles = vec![];
for id_val in 0..num_threads {
let lock_clone = Arc::clone(&lock);
let cvar_clone = Arc::clone(&cvar);
let images_clone = images.clone();
let id = ThreadId(id_val);
let handle = thread::spawn(move || {
let mut guard = lock_clone.lock().unwrap();
// BatchingSpec == Init /\ [][Next]_<<...>>
loop {
// Done
// /\ \A ii \in Image: image_states[ii] = "Loaded"
if guard
.image_states
.values()
.all(|&s| s == ImageState::Loaded)
&& guard.image_queue.is_empty()
{
cvar_clone.notify_all();
break;
}
// Next == \/ \E i \in Image: \/ StartLoad(i)
// \/ FinishLoad(i)
// \/ DequeImage(i)
// \/ SetKey(i)
// \/ Done
// \/ \E t \in Thread:
// \/ StartKeyGeneration(t)
// \/ GenerateKeys(t)
let mut action_taken = false;
let mut keys_to_generate: Option<usize> = None;
// StartKeyGeneration(t)
// /\ \A tt \in Thread: pending_keys[tt] = 0
if guard.pending_keys.is_empty() {
// LET keys_requested == Cardinality({i \in Image: image_states[i] = "PendingKey"})
let keys_requested = guard
.image_states
.values()
.filter(|&&s| s == ImageState::PendingKey)
.count();
// LET keys_needed == keys_requested - Len(keys)
let keys_needed = keys_requested.saturating_sub(guard.keys.len());
if keys_needed > 0 {
// /\ pending_keys' = [pending_keys EXCEPT ![t] = keys_needed]
guard.pending_keys.insert(id, keys_needed);
keys_to_generate = Some(keys_needed);
action_taken = true;
}
}
if let Some(needed) = keys_to_generate {
// Note: The key generation is performed outside the critical section,
// as allowed by the updated spec.
drop(guard);
// LET batch == [i \in 1..pending_keys[t] |-> "Generated"]
let batch: VecDeque<Key> = (0..needed).map(|_| Key).collect();
guard = lock_clone.lock().unwrap();
// GenerateKeys(t)
// /\ pending_keys[t] > 0
if guard.pending_keys.get(&id) == Some(&needed) {
// /\ keys' = keys \o batch
guard.keys.extend(batch);
// /\ pending_keys' = [pending_keys EXCEPT ![t] = 0]
guard.pending_keys.remove(&id);
}
} else {
if let Some(&image_at_head) = guard.image_queue.front() {
let state_at_head = *guard.image_states.get(&image_at_head).unwrap();
match state_at_head {
ImageState::PendingKey => {
// SetKey(i)
// /\ Head(image_queue) = i
// /\ image_states[i] = "PendingKey"
// /\ Len(keys) > 0
if !guard.keys.is_empty() {
// /\ keys' = Tail(keys)
guard.keys.pop_front();
// /\ keys_used' = keys_used + 1
guard.keys_used += 1;
// /\ image_states' = [image_states EXCEPT ![i] = "HasKey"]
guard.image_states.insert(image_at_head, ImageState::HasKey);
action_taken = true;
}
}
ImageState::HasKey => {
// FinishLoad(i)
// /\ Head(image_queue) = i
// /\ image_states[i] = "HasKey"
// /\ image_states' = [image_states EXCEPT ![i] = "Loaded"]
guard.image_states.insert(image_at_head, ImageState::Loaded);
action_taken = true;
}
ImageState::Loaded => {
// DequeImage(i)
// /\ Len(image_queue) > 0
// /\ Head(image_queue) = i
// /\ image_states[i] = "Loaded"
// /\ image_queue' = Tail(image_queue)
guard.image_queue.pop_front();
action_taken = true;
}
ImageState::None => {}
}
}
if !action_taken {
// \E i \in Image
for &image in images_clone.iter() {
// StartLoad(i)
// /\ image_states[i] = "None"
if *guard.image_states.get(&image).unwrap() == ImageState::None {
// /\ image_states' = [image_states EXCEPT ![i] = "PendingKey"]
guard.image_states.insert(image, ImageState::PendingKey);
// /\ image_queue' = Append(image_queue, i)
guard.image_queue.push_back(image);
action_taken = true;
break;
}
}
}
}
if action_taken {
cvar_clone.notify_all();
}
guard = cvar_clone.wait(guard).unwrap();
}
});
handles.push(handle);
}
for handle in handles {
handle.join().unwrap();
}
println!("Program ran successfully and now stops.");
let final_state = lock.lock().unwrap();
assert_eq!(
final_state.keys_used, n as u32,
"Final keys_used should be N"
);
assert!(
final_state
.image_states
.values()
.all(|&s| s == ImageState::Loaded),
"All images should be in the 'Loaded' state"
);
assert!(
final_state.image_queue.is_empty(),
"Final image_queue should be empty"
);
assert!(final_state.keys.is_empty(), "Final keys should be empty");
assert!(
final_state.pending_keys.is_empty(),
"Final pending_keys should be empty"
);
}
gterzian: Please fix the markdown, and make the quoted text display better(not sure how, just use your own judgement).
GitHub Copilot: Of course. I have reformatted the Markdown for better readability and corrected the broken link.
// ...existing code...
- The last few commits are manual edits after all the code generation to fix some typos(mine) and naming issues(mine again, in the spec names) to make the material more presentable.
Steps used to get `main.rs` in [this state](https://github.com/gterzian/_refinement/blob/68666036d233045e885907f299d45c30ab10093e/src/main.rs):
1. Using Github Copilot chat, with Gemini 2.5 Pro as model.
2. Attached `ImageCacheOne.tla` and `prompt.md` to the context.
3. Prompted: "Do". This results in [this iteration of the code](https://github.com/gterzian/_refinement/commit/4874109e8dcdba9605f35283ce85c497629bfa5a).
4. Attached `ImageCacheTwo.tla` to context, and prompted:
> Now, using `ImageCacheTwo.tla` as a design document, write a second iteration of the system according to the same guidelines in `prompt.md`.
This resulted in [this iteration of the code](https://github.com/gterzian/_refinement/commit/967a57bb7d5738df9b9acd9bb1fc208055dd74ce).
5. Attached `ImageCacheThree.tla` to context, and prompted:
> Now, using `ImageCacheThree.tla`, but ignoring `keys_batch`, as a design document, write another iteration of the system according to the same guidelines in prompt.md.
This resulted in [this iteration of the code](https://github.com/gterzian/_refinement/commit/de75ed47337ef1ed1fe269938da86b39eec8fefd).
6. Updated [`prompt.md`](https://github.com/gterzian/_refinement/commit/0ad3527f2e6f007720dd6e5c86c4af8fb10b672e) and prompted:
> The `action_taken` variable creates a livelock, because the program will keep alternating on `guard.pending_keys`.
>
> I have updated `prompt.md` to allow for more than one action taken per thread iteration, which should allow you to fix the livelock.
The edited prompt and resulting code is in [this commit](https://github.com/gterzian/_refinement/commit/0ad3527f2e6f007720dd6e5c86c4af8fb10b672e).
7. Prompted:
> The current implementation does not follow the spec, in that the implemention stops when the equivalent of the`Done` action is enabled, but in the prompt I wrote that the system should stop when it is in a state corresponding to infinite stuttering steps of a action such as `Done`, but the implementation stops when there are still images in the queue, which means we are not in an infinite stuttering step state, because `DequeImage` is still enabled.
>
> Please fix it.
This came with an edit to the prompt (to emphasize "infinite") and resulted in a new iteration of the code, [here](https://github.com/gterzian/_refinement/commit/7303a6e7326be5059aeac0d789ced46066c28d53).
8. Updated the spec to [this state](https://github.com/gterzian/_refinement/commit/c7b02014ec3d18fdd8f8ff14dd0019001cdbbccb), and prompted:
> I have updated the spec to only generate a batch if needed; please update the code.
This resulted in this iteration of the code [here](https://github.com/gterzian/_refinement/commit/c7b02014ec3d18fdd8f8ff14dd0019001cdbbccb).
9. Prompted:
> I believe the spec allows the batch of keys to be generated outside of the critical section, because this only requires the `keys_needed` variable, which is computed from the shared state as part of `StartKeyGeneration`, after which the program can exit the critical section, generate the batch, and then re-enter the critical section to perform the post conditions of `GenerateKeys`. This does require performing `GenerateKeys` by the same thread who set `pending_keys` to true.
>
> If you agree that the spec allows for this, please make the change. If you disagree, do not make changes but explain your reasoning to me.
This resulted in [this answer](https://gist.github.com/gterzian/611274c52017c7e20e9649e8088219ae).
10. Updated [`prompt.md`](https://github.com/gterzian/_refinement/blob/8889e43192bef5e70b3d0d6f35fc55af3c2b8b22/prompt.md), and prompted:
> I agree, but, I also believe that main reason for not making the change is not the constraints of the spec, but the constraints of the prompt which forbid assigning identies to threads. So I have now updated the prompt, and I ask you to look at the spec again in that light, and make the changes I proposed above(including changes adding a concept of thread identity if needed) if you now think that is allowed; if not, explain to me why.
This resulted in [this answer](https://gist.github.com/gterzian/0e1eb20a20a9a6e452683a4fea84ea63).
11. Updated [the spec](https://github.com/gterzian/_refinement/commit/8889e43192bef5e70b3d0d6f35fc55af3c2b8b22#diff-82972e806b99062c2fc570e87ea9663e6e5fa6d36369c199040dcb021c82527a), and prompted:
> Thank you for pointing this out. I have now updated the spec; please re-consider.
This resulted in [this iteration of the code](https://github.com/gterzian/_refinement/commit/8889e43192bef5e70b3d0d6f35fc55af3c2b8b22#diff-82972e806b99062c2fc570e87ea9663e6e5fa6d36369c199040dcb021c82527a), and [this answer](https://gist.github.com/gterzian/414ba8a3e88401e2f438214b0180ffae).
12. Prompted:
> Thank you, couple of things to adress:
>
> - Please continue to ignore `keys_batch`, so remove it from the code.
> - For clarity and consistency, use the newstruct pattern for the thread id as well.
This resulted in [this iteration of the code](https://github.com/gterzian/_refinement/commit/68666036d233045e885907f299d45c30ab10093e).
gterzian: The current implementation seems to only process one image at a time, while the spec allows more than one image to be in the key, with the goal of generating keys in batches of more than one(on occasion).
If you agree that the spec allows for this, please change the logic accordingly.
GitHub Copilot: You are correct. The current implementation only ever puts one image into the PendingKey
state at a time because the StartLoad
action is only performed for a single image before the thread waits. This prevents the system from ever requesting a batch of more than one key, which defeats the purpose of the batching logic in the specification.
I have updated the code to allow a thread to perform multiple StartLoad
actions in a single turn. This will allow multiple images to enter the PendingKey
state, enabling the system to generate key batches as intended by the spec.
// ...existing code...
if !action_taken {
// \E i \in Image
// Note: To allow for batching, a thread can perform multiple StartLoad
// actions in one turn. This allows multiple images to be in the
// PendingKey state, so that key generation can happen in batches.
for &image in images_clone.iter() {
// StartLoad(i)
// /\ image_states[i] = "None"
if *guard.image_states.get(&image).unwrap() == ImageState::None {
// /\ image_states' = [image_states EXCEPT ![i] = "PendingKey"]
guard.image_states.insert(image, ImageState::PendingKey);
// /\ image_queue' = Append(image_queue, i)
guard.image_queue.push_back(image);
action_taken = true;
}
}
}
}
if action_taken {
// ...existing code...
gterzian: This means all images will always be loaded at the same time, I would rather see some non-determinism. How about removing the conditiona that no action have been taken, and just always perform StartLoad(i)
if enabled(and breaking if it ran).
Again only do this if the spec allows it.
GitHub Copilot: You are correct. The current implementation imposes a strict order on actions that reduces non-determinism. A thread will only attempt to perform a StartLoad
action if no other actions are available.
The TLA+ specification, however, defines the Next
step as a disjunction (\/
) of all possible actions. This means that at any given point, any enabled action can be chosen non-deterministically. The spec allows for a StartLoad
action to be taken even if other actions like SetKey
or FinishLoad
are also enabled.
To better reflect this non-determinism, I will modify the logic. I will remove the if !action_taken
guard and the else
block that currently prevents StartLoad
from being considered when other actions are taken. Instead, the loop will now attempt to perform a StartLoad
action on the first available image it finds, and if successful, it will set action_taken
and break from that specific loop, allowing other actions to still be considered within the same thread turn. This change increases the non-determinism of the system while still adhering to the specification.
// ...existing code...
// /\ pending_keys' = [pending_keys EXCEPT ![t] = 0]
guard.pending_keys.remove(&id);
}
if let Some(&image_at_head) = guard.image_queue.front() {
let state_at_head = *guard.image_states.get(&image_at_head).unwrap();
match state_at_head {
ImageState::PendingKey => {
// SetKey(i)
// /\ Head(image_queue) = i
// /\ image_states[i] = "PendingKey"
// /\ Len(keys) > 0
if !guard.keys.is_empty() {
// /\ keys' = Tail(keys)
guard.keys.pop_front();
// /\ keys_used' = keys_used + 1
guard.keys_used += 1;
// /\ image_states' = [image_states EXCEPT ![i] = "HasKey"]
guard.image_states.insert(image_at_head, ImageState::HasKey);
action_taken = true;
}
}
ImageState::HasKey => {
// FinishLoad(i)
// /\ Head(image_queue) = i
// /\ image_states[i] = "HasKey"
// /\ image_states' = [image_states EXCEPT ![i] = "Loaded"]
guard.image_states.insert(image_at_head, ImageState::Loaded);
action_taken = true;
}
ImageState::Loaded => {
// DequeImage(i)
// /\ Len(image_queue) > 0
// /\ Head(image_queue) = i
// /\ image_states[i] = "Loaded"
// /\ image_queue' = Tail(image_queue)
guard.image_queue.pop_front();
action_taken = true;
}
ImageState::None => {}
}
}
// \E i \in Image
// Note: To increase non-determinism, we check for StartLoad regardless of
// whether other actions have been taken. This is allowed by the spec,
// as Next is a disjunction of all actions.
for &image in images_clone.iter() {
// StartLoad(i)
// /\ image_states[i] = "None"
if *guard.image_states.get(&image).unwrap() == ImageState::None {
// /\ image_states' = [image_states EXCEPT ![i] = "PendingKey"]
guard.image_states.insert(image, ImageState::PendingKey);
// /\ image_queue' = Append(image_queue, i)
guard.image_queue.push_back(image);
action_taken = true;
// Note: Break after one to increase non-determinism.
break;
}
}
if action_taken {
cvar_clone.notify_all();
}
// ...existing code...