diff --git a/Project.toml b/Project.toml index 85e6211..a95bb3e 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "DecisionFocusedLearningBenchmarks" uuid = "2fbe496a-299b-4c81-bab5-c44dfc55cf20" -authors = ["Members of JuliaDecisionFocusedLearning"] version = "0.4.0" +authors = ["Members of JuliaDecisionFocusedLearning"] [workspace] projects = ["docs", "test"] diff --git a/docs/src/api.md b/docs/src/api.md index 9f45d03..16d3658 100644 --- a/docs/src/api.md +++ b/docs/src/api.md @@ -72,6 +72,18 @@ Modules = [DecisionFocusedLearningBenchmarks.FixedSizeShortestPath] Public = false ``` +## Maintenance + +```@autodocs +Modules = [DecisionFocusedLearningBenchmarks.Maintenance] +Private = false +``` + +```@autodocs +Modules = [DecisionFocusedLearningBenchmarks.Maintenance] +Public = false +``` + ## Portfolio Optimization ```@autodocs diff --git a/docs/src/benchmarks/maintenance.md b/docs/src/benchmarks/maintenance.md new file mode 100644 index 0000000..236501c --- /dev/null +++ b/docs/src/benchmarks/maintenance.md @@ -0,0 +1,107 @@ +# Maintenance problem with resource constraint + +The Maintenance problem with resource constraint is a sequential decision-making benchmark where an agent must repeatedly decide which components to maintain over time. The goal is to minimize total expected cost while accounting for independent degradation of components and limited maintenance capacity. + + +## Problem Description + +### Overview + +In this benchmark, a system consists of $N$ identical components, each of which can degrade over $n$ discrete states. State $1$ means that the component is new, state $n$ means that the component is failed. At each time step, the agent can maintain up to $K$ components. + +This forms an endogenous multistage stochastic optimization problem, where the agent must plan maintenance actions over the horizon. + +### Mathematical Formulation + +The maintenance problem can be formulated as a finite-horizon Markov Decision Process (MDP) with the following components: + +**State Space** $\mathcal{S}$: At time step $t$, the state $s_t \in [1:n]^N$ is the degradation state for each component. + +**Action Space** $\mathcal{A}$: The action at time $t$ is the set of components that are maintained at time $t$: +```math +a_t \subseteq \{1, 2, \ldots, N\} \text{ such that } |a_t| \leq K +``` +### Transition Dynamics + +The state transitions depend on whether a component is maintained or not: + +For each component \(i\) at time \(t\): + +- **Maintained component** (\(i \in a_t\)): + +\[ +s_{t+1}^i = 1 \quad \text{(perfect maintenance)} +\] + +- **Unmaintained component** (\(i \notin a_t\)): + +\[ +s_{t+1}^i = +\begin{cases} +\min(s_t^i + 1, n) & \text{with probability } p,\\ +s_t^i & \text{with probability } 1-p. +\end{cases} +\] + +Here, \(p\) is the degradation probability, \(s_t^i\) is the current state of component \(i\), and \(n\) is the maximum (failed) state. + +--- + +### Cost Function + +The immediate cost at time \(t\) is: + +$$ +c(s_t, a_t) = \Big( c_m \cdot |a_t| + c_f \cdot \#\{ i : s_t^i = n \} \Big) +$$ + +Where: + +- $c_m$ is the maintenance cost per component. +- $|a_t|$ is the number of components maintained. +- $c_f$ is the failure cost per failed component. +- $\#\{ i : s_t^i = n \}$ counts the number of components in the failed state. + +This formulation captures the total cost for maintaining components and penalizing failures. + +**Objective**: Find a policy $\pi: \mathcal{S} \to \mathcal{A}$ that minimizes the expected cumulative cost: +```math +\min_\pi \mathbb{E}\left[\sum_{t=1}^T c(s_t, \pi(s_t)) \right] +``` + +**Terminal Condition**: The episode terminates after $T$ time steps, with no terminal reward. + +## Key Components + +### [`MaintenanceBenchmark`](@ref) + +The main benchmark configuration with the following parameters: + +- `N`: number of components (default: 2) +- `K`: maximum number of components that can be maintained simultaneously (default: 1) +- `n`: number of degradation states per component (default: 3) +- `p`: degradation probability (default: 0.2) +- `c_f`: failure cost (default: 10.0) +- `c_m`: maintenance cost (default: 3.0) +- `max_steps`: Number of time steps per episode (default: 80) + +### Instance Generation + +Each problem instance includes: + +- **Starting State**: Random starting degradation state in $[1,n]$ for each components. + +### Environment Dynamics + +The environment tracks: +- Current time step +- Current degradation state. + +**State Observation**: Agents observe a normalized feature vector containing the degradation state of each component. + +## Benchmark Policies + +### Greedy Policy + +Greedy policy that maintains components in the last two degradation states, up to the maintenance capacity. This provides a simple baseline. + diff --git a/docs/src/index.md b/docs/src/index.md index 0e81d5a..363abee 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -61,6 +61,7 @@ Single-stage optimization problems under uncertainty: Multi-stage sequential decision-making problems: - [`DynamicVehicleSchedulingBenchmark`](@ref): multi-stage vehicle scheduling under customer uncertainty - [`DynamicAssortmentBenchmark`](@ref): sequential product assortment selection with endogenous uncertainty +- [`MaintenanceBenchmark`](@ref): maintenance problem with resource constraint ## Getting Started diff --git a/src/DecisionFocusedLearningBenchmarks.jl b/src/DecisionFocusedLearningBenchmarks.jl index 4515ec1..2f1c320 100644 --- a/src/DecisionFocusedLearningBenchmarks.jl +++ b/src/DecisionFocusedLearningBenchmarks.jl @@ -57,6 +57,7 @@ include("PortfolioOptimization/PortfolioOptimization.jl") include("StochasticVehicleScheduling/StochasticVehicleScheduling.jl") include("DynamicVehicleScheduling/DynamicVehicleScheduling.jl") include("DynamicAssortment/DynamicAssortment.jl") +include("Maintenance/Maintenance.jl") using .Utils @@ -89,6 +90,7 @@ using .PortfolioOptimization using .StochasticVehicleScheduling using .DynamicVehicleScheduling using .DynamicAssortment +using .Maintenance export Argmax2DBenchmark export ArgmaxBenchmark @@ -100,5 +102,6 @@ export RankingBenchmark export StochasticVehicleSchedulingBenchmark export SubsetSelectionBenchmark export WarcraftBenchmark +export MaintenanceBenchmark end # module DecisionFocusedLearningBenchmarks diff --git a/src/DynamicAssortment/environment.jl b/src/DynamicAssortment/environment.jl index 603ca84..5236e2c 100644 --- a/src/DynamicAssortment/environment.jl +++ b/src/DynamicAssortment/environment.jl @@ -7,7 +7,7 @@ Environment for the dynamic assortment problem. $TYPEDFIELDS """ @kwdef mutable struct Environment{I<:Instance,R<:AbstractRNG,S<:Union{Nothing,Int}} <: - Utils.AbstractEnvironment + AbstractEnvironment "associated instance" instance::I "current step" @@ -197,16 +197,25 @@ Features observed by the agent at current step, as a concatenation of: - change in hype and saturation features from the starting state - normalized current step (divided by max steps and multiplied by 10) All features are normalized by dividing by 10. + +State +Return as a tuple: +- `env.features`: the current feature matrix (feature vector for all items). +- `env.purchase_history`: the purchase history over the most recent steps. """ function Utils.observe(env::Environment) delta_features = env.features[2:3, :] .- env.instance.starting_hype_and_saturation - return vcat( - env.features, - env.d_features, - delta_features, - ones(1, item_count(env)) .* (env.step / max_steps(env) * 10), - ) ./ 10, - nothing + features = + vcat( + env.features, + env.d_features, + delta_features, + ones(1, item_count(env)) .* (env.step / max_steps(env) * 10), + ) ./ 10 + + state = (copy(env.features), copy(env.purchase_history)) + + return features, state end """ diff --git a/src/Maintenance/Maintenance.jl b/src/Maintenance/Maintenance.jl new file mode 100644 index 0000000..5dc0580 --- /dev/null +++ b/src/Maintenance/Maintenance.jl @@ -0,0 +1,144 @@ +module Maintenance + +using ..Utils + +using DocStringExtensions: TYPEDEF, TYPEDFIELDS, TYPEDSIGNATURES, SIGNATURES +using Distributions: Uniform, Categorical +using Flux: Chain, Dense +using LinearAlgebra: dot +using Random: Random, AbstractRNG, MersenneTwister +using Statistics: mean + +using Combinatorics: combinations + +""" +$TYPEDEF + +Benchmark for a standard maintenance problem with resource constraints. +Components are identical and degrade independently over time. +A high cost is incurred for each component that reaches the final degradation level. +A cost is also incurred for maintaining a component. +The number of simultaneous maintenance operations is limited by a maintenance capacity constraint. + +# Fields +$TYPEDFIELDS + +""" +struct MaintenanceBenchmark <: AbstractDynamicBenchmark{true} + "number of components" + N::Int + "maximum number of components that can be maintained simultaneously" + K::Int + "number of degradation states per component" + n::Int + "degradation probability" + p::Float64 + "failure cost" + c_f::Float64 + "maintenance cost" + c_m::Float64 + "number of steps per episode" + max_steps::Int + + function MaintenanceBenchmark(N, K, n, p, c_f, c_m, max_steps) + @assert K <= N "number of maintained components $K > number of components $N" + @assert K >= 0 && N >= 0 "number of components should be positive" + @assert 0 <= p <= 1 "degradation probability $p is not in [0, 1]" + return new(N, K, n, p, c_f, c_m, max_steps) + end +end + +""" + MaintenanceBenchmark(; + N=2, + K=1, + n=3, + p=0.2 + c_f=10.0, + c_m=3.0, + max_steps=80, + ) + +Constructor for [`MaintenanceBenchmark`](@ref). +By default, the benchmark has 2 components, maintenance capacity 1, number of degradation levels 3, +degradation probability 0.2, failure cost 10.0, maintenance cost 3.0, 80 steps per episode, and is exogenous. +""" +function MaintenanceBenchmark(; N=2, K=1, n=3, p=0.2, c_f=10.0, c_m=3.0, max_steps=80) + return MaintenanceBenchmark(N, K, n, p, c_f, c_m, max_steps) +end + +# Accessor functions +component_count(b::MaintenanceBenchmark) = b.N +maintenance_capacity(b::MaintenanceBenchmark) = b.K +degradation_levels(b::MaintenanceBenchmark) = b.n +degradation_probability(b::MaintenanceBenchmark) = b.p +failure_cost(b::MaintenanceBenchmark) = b.c_f +maintenance_cost(b::MaintenanceBenchmark) = b.c_m +max_steps(b::MaintenanceBenchmark) = b.max_steps + +include("instance.jl") +include("environment.jl") +include("policies.jl") +include("maximizer.jl") + +""" +$TYPEDSIGNATURES + +Outputs a data sample containing an [`Instance`](@ref). +""" +function Utils.generate_sample(b::MaintenanceBenchmark, rng::AbstractRNG) + return DataSample(; instance=Instance(b, rng)) +end + +""" +$TYPEDSIGNATURES + +Generates a statistical model for the maintenance benchmark. +The model is a small neural network with one hidden layer no activation function. +""" +function Utils.generate_statistical_model(b::MaintenanceBenchmark; seed=nothing) + Random.seed!(seed) + N = component_count(b) + return Chain(Dense(N => N), Dense(N => N), vec) +end + +""" +$TYPEDSIGNATURES + +Outputs a top k maximizer, with k being the maintenance capacity of the benchmark. +""" +function Utils.generate_maximizer(b::MaintenanceBenchmark) + return TopKPositiveMaximizer(maintenance_capacity(b)) +end + +""" +$TYPEDSIGNATURES + +Creates an [`Environment`](@ref) from an [`Instance`](@ref) of the maintenance benchmark. +The seed of the environment is randomly generated using the provided random number generator. +""" +function Utils.generate_environment( + ::MaintenanceBenchmark, instance::Instance, rng::AbstractRNG; kwargs... +) + seed = rand(rng, 1:typemax(Int)) + return Environment(instance; seed) +end + +""" +$TYPEDSIGNATURES + +Returns two policies for the dynamic assortment benchmark: +- `Greedy`: maintains components when they are in the last state before failure, up to the maintenance capacity +""" +function Utils.generate_policies(::MaintenanceBenchmark) + greedy = Policy( + "Greedy", + "policy that maintains components when they are in the last state before failure, up to the maintenance capacity", + greedy_policy, + ) + return (greedy,) +end + +export MaintenanceBenchmark + +end diff --git a/src/Maintenance/environment.jl b/src/Maintenance/environment.jl new file mode 100644 index 0000000..6d29ec2 --- /dev/null +++ b/src/Maintenance/environment.jl @@ -0,0 +1,155 @@ +""" +$TYPEDEF + +Environment for the maintenance problem. + +# Fields +$TYPEDFIELDS +""" +@kwdef mutable struct Environment{R<:AbstractRNG,S<:Union{Nothing,Int}} <: + AbstractEnvironment + "associated instance" + instance::Instance + "current step" + step::Int + "degradation state" + degradation_state::Vector{Int} + "rng" + rng::R + "seed for RNG" + seed::S +end + +""" +$TYPEDSIGNATURES + +Creates an [`Environment`](@ref) from an [`Instance`](@ref) of the maintenance benchmark. +""" +function Environment(instance::Instance; seed=0, rng::AbstractRNG=MersenneTwister(seed)) + degradation_state = copy(starting_state(instance)) + env = Environment(; instance, step=1, degradation_state, rng=rng, seed=seed) + Utils.reset!(env; reset_rng=true) + return env +end + +component_count(env::Environment) = component_count(env.instance) +maintenance_capacity(env::Environment) = maintenance_capacity(env.instance) +degradation_levels(env::Environment) = degradation_levels(env.instance) +degradation_probability(env::Environment) = degradation_probability(env.instance) +failure_cost(env::Environment) = failure_cost(env.instance) +maintenance_cost(env::Environment) = maintenance_cost(env.instance) +max_steps(env::Environment) = max_steps(env.instance) +starting_state(env::Environment) = starting_state(env.instance) + +""" +$TYPEDSIGNATURES +Draw random degradations for all components. +""" +function degrad!(env::Environment) + N = component_count(env) + n = degradation_levels(env) + p = degradation_probability(env) + rng = env.rng + + for i in 1:N + if env.degradation_state[i] < n && rand(rng) < p + env.degradation_state[i] += 1 + end + end + + return env.degradation_state +end + +""" +$TYPEDSIGNATURES +Maintain components. +""" +function maintain!(env::Environment, maintenance::BitVector) + N = component_count(env) + + for i in 1:N + if maintenance[i] + env.degradation_state[i] = 1 + end + end + + return env.degradation_state +end + +""" +$TYPEDSIGNATURES + +Compute maintenance cost. +""" +function maintenance_cost(env::Environment, maintenance::BitVector) + return maintenance_cost(env) * sum(maintenance) +end + +""" +$TYPEDSIGNATURES + +Compute degradation cost. +""" +function degradation_cost(env::Environment) + n = degradation_levels(env) + return failure_cost(env) * count(==(n), env.degradation_state) +end + +""" +$TYPEDSIGNATURES + +Outputs the seed of the environment. +""" +Utils.get_seed(env::Environment) = env.seed + +""" +$TYPEDSIGNATURES + +Resets the environment to the initial state: +- reset the rng if `reset_rng` is true +- reset the step to 1 +- reset the degradation state to the starting state +""" +function Utils.reset!(env::Environment; reset_rng=false, seed=env.seed) + reset_rng && Random.seed!(env.rng, seed) + env.step = 1 + env.degradation_state .= starting_state(env) + return nothing +end + +""" +$TYPEDSIGNATURES + +Checks if the environment has reached the maximum number of steps. +""" +function Utils.is_terminated(env::Environment) + return env.step > max_steps(env) +end + +""" +$TYPEDSIGNATURES + +Returns features, state tuple. +The features observed by the agent at current step are the degradation states of all components. +It is also the internal state, so we return the same thing twice. + +""" +function Utils.observe(env::Environment) + state = env.degradation_state + return state, state +end + +""" +$TYPEDSIGNATURES + +Performs one step in the environment given a maintenance. +Draw random degradations for components that are not maintained. +""" +function Utils.step!(env::Environment, maintenance::BitVector) + @assert !Utils.is_terminated(env) "Environment is terminated, cannot act!" + cost = maintenance_cost(env, maintenance) + degradation_cost(env) + degrad!(env) + maintain!(env, maintenance) + env.step += 1 + return cost +end diff --git a/src/Maintenance/instance.jl b/src/Maintenance/instance.jl new file mode 100644 index 0000000..4a2f4fb --- /dev/null +++ b/src/Maintenance/instance.jl @@ -0,0 +1,36 @@ +""" +$TYPEDEF + +Instance of the maintenance problem. + +# Fields +$TYPEDFIELDS +""" +@kwdef struct Instance{MaintenanceBenchmark} + "associated benchmark" + config::MaintenanceBenchmark + "starting degradation states" + starting_state::Vector{Int} +end + +""" +$TYPEDSIGNATURES + +Generates an instance with random starting degradation states uniformly in [1, n] +""" +function Instance(b::MaintenanceBenchmark, rng::AbstractRNG) + N = component_count(b) + n = degradation_levels(b) + starting_state = rand(rng, 1:n, N) + return Instance(; config=b, starting_state=starting_state) +end + +# Accessor functions +component_count(b::Instance) = component_count(b.config) +maintenance_capacity(b::Instance) = maintenance_capacity(b.config) +degradation_levels(b::Instance) = degradation_levels(b.config) +degradation_probability(b::Instance) = degradation_probability(b.config) +failure_cost(b::Instance) = failure_cost(b.config) +maintenance_cost(b::Instance) = maintenance_cost(b.config) +max_steps(b::Instance) = max_steps(b.config) +starting_state(b::Instance) = b.starting_state diff --git a/src/Maintenance/maximizer.jl b/src/Maintenance/maximizer.jl new file mode 100644 index 0000000..1e7a653 --- /dev/null +++ b/src/Maintenance/maximizer.jl @@ -0,0 +1,32 @@ +""" +$TYPEDEF + +Top k maximizer. +""" +struct TopKPositiveMaximizer + k::Int +end + +""" +$TYPEDSIGNATURES + +Return the top k indices of `θ`. +""" +function (m::TopKPositiveMaximizer)(θ; kwargs...) + N = length(θ) + + positive_indices = findall(x -> x > 0, θ) + nb_positive = length(positive_indices) + res = falses(N) + + if nb_positive == 0 + return res + elseif nb_positive <= m.k + res[positive_indices] .= true + return res + else + idx = partialsortperm(θ[positive_indices], 1:(m.k); rev=true) + res[positive_indices[idx]] .= true + return res + end +end diff --git a/src/Maintenance/policies.jl b/src/Maintenance/policies.jl new file mode 100644 index 0000000..89de852 --- /dev/null +++ b/src/Maintenance/policies.jl @@ -0,0 +1,26 @@ + +""" +$TYPEDSIGNATURES + +Greedy policy that maintains components when they are in the last state before failure, up to the maintenance capacity. +""" +function greedy_policy(env::Environment) + state = env.degradation_state + N = component_count(env) + K = maintenance_capacity(env) + res = falses(N) + n = degradation_levels(env) + + idx_max = findall(==(n), state) + take = first(idx_max, min(K, length(idx_max))) + res[take] .= true + remaining = K - length(take) + + if remaining > 0 + idx_second = findall(==(n - 1), state) + take2 = first(idx_second, min(remaining, length(idx_second))) + res[take2] .= true + end + + return res +end diff --git a/test/dynamic_assortment.jl b/test/dynamic_assortment.jl index a3feddb..97674bf 100644 --- a/test/dynamic_assortment.jl +++ b/test/dynamic_assortment.jl @@ -246,22 +246,28 @@ end instance = DAP.Instance(b, MersenneTwister(42)) env = DAP.Environment(instance; seed=123) - obs, info = observe(env) + features, state = observe(env) # Check observation dimensions: (d + 8, N) # Features: prices(1) + hype_sat(2) + static(d) + d_features(2) + delta_features(2) + step(1) expected_rows = 2 + 8 # d + 8 where d=2 - @test size(obs) == (expected_rows, 3) - @test info === nothing + @test size(features) == (expected_rows, 3) - @test all(-1.0 ≤ x ≤ 1.0 for x in obs) + v, purchase_history = state + @test size(v) == (5, 3) + + @test all(-1.0 ≤ x ≤ 1.0 for x in features) # Test observation changes with step - obs1, _ = observe(env) + features1, state1 = observe(env) + v, purchase_history1 = state1 + DAP.buy_item!(env, 1) - obs2, _ = observe(env) + features2, state2 = observe(env) + _, purchase_history2 = state2 - @test obs1 != obs2 # Observations should differ after purchase + @test purchase_history1 != purchase_history2 # Observations should differ after purchase + @test features1 != features2 end @testset "DynamicAssortment - Policies" begin diff --git a/test/maintenance.jl b/test/maintenance.jl new file mode 100644 index 0000000..d7183f5 --- /dev/null +++ b/test/maintenance.jl @@ -0,0 +1,229 @@ +const maintenance = DecisionFocusedLearningBenchmarks.Maintenance + +@testset "Maintenance - Benchmark Construction" begin + # Test default constructor + b = MaintenanceBenchmark() + @test b.N == 2 + @test b.K == 1 + @test b.n == 3 + @test b.p == 0.2 + @test b.c_f == 10.0 + @test b.c_m == 3.0 + @test b.max_steps == 80 + @test is_exogenous(b) + @test !is_endogenous(b) + + # Test custom constructor + b_custom = MaintenanceBenchmark(; N=10, K=3, n=5, p=0.3, c_f=5.0, c_m=3.0, max_steps=50) + @test b_custom.N == 10 + @test b_custom.K == 3 + @test b_custom.n == 5 + @test b_custom.p == 0.3 + @test b_custom.c_f == 5.0 + @test b_custom.c_m == 3.0 + @test b_custom.max_steps == 50 + + # Test accessor functions + @test maintenance.component_count(b) == 2 + @test maintenance.maintenance_capacity(b) == 1 + @test maintenance.degradation_levels(b) == 3 + @test maintenance.degradation_probability(b) == 0.2 + @test maintenance.failure_cost(b) == 10.0 + @test maintenance.maintenance_cost(b) == 3.0 + @test maintenance.max_steps(b) == 80 +end + +@testset "Maintenance - Instance Generation" begin + b = MaintenanceBenchmark(; N=10, K=3, n=5, p=0.3, c_f=5.0, c_m=3.0, max_steps=50) + rng = MersenneTwister(42) + + instance = maintenance.Instance(b, rng) + + # test state is randomly initialized + state1 = maintenance.starting_state(instance) + rng2 = MersenneTwister(43) + instance2 = maintenance.Instance(b, rng2) + state2 = maintenance.starting_state(instance2) + @test state1 != state2 + + # Test instance structure + @test length(instance.starting_state) == 10 + @test all(1.0 ≤ s ≤ 5 for s in instance.starting_state) + + # Test accessor functions + @test maintenance.component_count(instance) == 10 + @test maintenance.maintenance_capacity(instance) == 3 + @test maintenance.degradation_levels(instance) == 5 + @test maintenance.degradation_probability(instance) == 0.3 + @test maintenance.failure_cost(instance) == 5.0 + @test maintenance.maintenance_cost(instance) == 3.0 + @test maintenance.max_steps(instance) == 50 +end + +@testset "Maintenance - Environment Initialization" begin + b = MaintenanceBenchmark() + instance = maintenance.Instance(b, MersenneTwister(42)) + + env = maintenance.Environment(instance; seed=123) + + # Test initial state + @test env.step == 1 + @test env.seed == 123 + @test !is_terminated(env) + + # Test accessor functions + @test maintenance.component_count(env) == 2 + @test maintenance.maintenance_capacity(env) == 1 + @test maintenance.degradation_levels(env) == 3 + @test maintenance.degradation_probability(env) == 0.2 + @test maintenance.failure_cost(env) == 10.0 + @test maintenance.maintenance_cost(env) == 3.0 + @test maintenance.max_steps(env) == 80 +end + +@testset "Maintenance - Environment Reset" begin + b = MaintenanceBenchmark() + instance = maintenance.Instance(b, MersenneTwister(42)) + env = maintenance.Environment(instance; seed=123) + + # Modify environment state + env.step = 3 + + # Reset environment + reset!(env) + + # Check reset state + @test env.step == 1 +end + +@testset "Maintenance - Cost" begin + b = MaintenanceBenchmark() + instance = maintenance.Instance(b, MersenneTwister(42)) + env = maintenance.Environment(instance; seed=123) + + env.degradation_state = [1, 1] + @test maintenance.maintenance_cost(env, BitVector([false, false])) == 0.0 + @test maintenance.maintenance_cost(env, BitVector([false, true])) == 3.0 + @test maintenance.maintenance_cost(env, BitVector([true, true])) == 6.0 + + @test maintenance.degradation_cost(env) == 0.0 + env.degradation_state = [2, 2] + @test maintenance.degradation_cost(env) == 0.0 + env.degradation_state = [3, 2] + @test maintenance.degradation_cost(env) == 10.0 + env.degradation_state = [3, 3] + @test maintenance.degradation_cost(env) == 20.0 +end + +@testset "Maintenance - Environment Step" begin + b = MaintenanceBenchmark() + instance = maintenance.Instance(b, MersenneTwister(42)) + env = maintenance.Environment(instance; seed=123) + + maintenance_vect = BitVector([false, false]) + + initial_step = env.step + # Take a step + reward = step!(env, maintenance_vect) + + # Check step progression + @test env.step == initial_step + 1 + @test reward ≥ 0.0 # Reward should be non-negative + + # Test termination + for _ in 1:(maintenance.max_steps(env) - 1) + if !is_terminated(env) + step!(env, maintenance_vect) + end + end + @test is_terminated(env) + + # Test error on terminated environment + @test_throws AssertionError step!(env, maintenance_vect) +end + +@testset "Maintenance - Observation" begin + b = MaintenanceBenchmark() + instance = maintenance.Instance(b, MersenneTwister(42)) + env = maintenance.Environment(instance; seed=123) + env.degradation_state = [1, 1] + + state, features = observe(env) + + @test state == [1, 1] + @test features === state + + env.degradation_state = [2, 3] + state2, _ = observe(env) + + @test state != state2 # Observations should differ after purchase +end + +@testset "Maintenance - Policies" begin + using Statistics: mean + + b = MaintenanceBenchmark() + + # Generate test data + dataset = generate_dataset(b, 10; seed=0) + environments = generate_environments(b, dataset) + + # Get policies + policies = generate_policies(b) + greedy = policies[1] + + @test greedy.name == "Greedy" + + # Test policy evaluation + r_greedy, _ = evaluate_policy!(greedy, environments, 10) + + @test length(r_greedy) == length(environments) + @test all(r_greedy .≥ 0.0) + + # Test policy output format + env = environments[1] + reset!(env) + + greedy_action = greedy(env) + @test greedy_action isa BitVector && length(greedy_action) == 2 +end + +@testset "Maintenance - Model and Maximizer Integration" begin + b = MaintenanceBenchmark() + + # Test statistical model generation + model = generate_statistical_model(b; seed=42) + # Test maximizer generation + maximizer = generate_maximizer(b) + + # Test integration with sample data + sample = generate_sample(b, MersenneTwister(42)) + @test hasfield(typeof(sample), :info) + + dataset = generate_dataset(b, 3; seed=42) + environments = generate_environments(b, dataset) + + # Evaluate policy to get data samples + policies = generate_policies(b) + _, data_samples = evaluate_policy!(policies[1], environments) + + # Test model-maximizer pipeline + sample = data_samples[1] + x = sample.x + θ = model(x) + y = maximizer(θ) + + @test length(θ) == 2 + + θ = [1, 2] + @test maximizer(θ) == BitVector([false, true]) + + b = MaintenanceBenchmark(; N=10, K=3, n=5, p=0.3, c_f=5.0, c_m=3.0, max_steps=50) + θ = [i for i in 1:10] + maximizer = generate_maximizer(b) + @test maximizer(θ) == + BitVector([false, false, false, false, false, false, false, true, true, true]) + + θ = fill(-1.0, 10) + @test maximizer(θ) == falses(10) +end diff --git a/test/runtests.jl b/test/runtests.jl index 914a6e4..00bea25 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -14,6 +14,7 @@ using Random include("ranking.jl") include("subset_selection.jl") include("fixed_size_shortest_path.jl") + include("maintenance.jl") include("warcraft.jl") include("vsp.jl") include("portfolio_optimization.jl")