|
1 | 1 | @testitem "Portfolio Optimization" begin |
2 | 2 | using DecisionFocusedLearningBenchmarks |
3 | | - using InferOpt |
4 | | - using Flux |
5 | | - using Zygote |
6 | 3 |
|
7 | 4 | b = PortfolioOptimizationBenchmark() |
8 | 5 |
|
9 | 6 | dataset = generate_dataset(b, 100) |
10 | 7 | model = generate_statistical_model(b) |
11 | 8 | maximizer = generate_maximizer(b) |
12 | | - |
13 | | - # train_dataset, test_dataset = dataset[1:50], dataset[50:100] |
14 | | - # X_train = train_dataset.features |
15 | | - # Y_train = train_dataset.solutions |
16 | | - |
17 | | - # perturbed_maximizer = PerturbedAdditive(maximizer; ε=0.1, nb_samples=1) |
18 | | - # loss = FenchelYoungLoss(perturbed_maximizer) |
19 | | - |
20 | | - # starting_gap = compute_gap(b, test_dataset, model, maximizer) |
21 | | - |
22 | | - # opt_state = Flux.setup(Adam(), model) |
23 | | - # loss_history = Float64[] |
24 | | - # for epoch in 1:50 |
25 | | - # val, grads = Flux.withgradient(model) do m |
26 | | - # sum(loss(m(x), y) for (x, y) in zip(X_train, Y_train)) / length(train_dataset) |
27 | | - # end |
28 | | - # Flux.update!(opt_state, model, grads[1]) |
29 | | - # push!(loss_history, val) |
30 | | - # end |
31 | | - |
32 | | - # final_gap = compute_gap(b, test_dataset, model, maximizer) |
33 | | - |
34 | | - # @test loss_history[end] < loss_history[1] |
35 | | - # @test final_gap < starting_gap / 10 |
36 | 9 | end |
0 commit comments