/// Sample showing how to model and solve a capacitated vehicle routing
@@ -271,8 +272,7 @@ private void Solve(int number_of_orders, int number_of_vehicles)
}
// Solving
- RoutingSearchParameters search_parameters =
- operations_research_constraint_solver.DefaultRoutingSearchParameters();
+ RoutingSearchParameters search_parameters = RoutingGlobals.DefaultRoutingSearchParameters();
search_parameters.FirstSolutionStrategy = FirstSolutionStrategy.Types.Value.AllUnperformed;
Console.WriteLine("Search...");
diff --git a/examples/dotnet/cstsp.cs b/examples/dotnet/cstsp.cs
index e5eb55daab0..f42a3ecf4a1 100644
--- a/examples/dotnet/cstsp.cs
+++ b/examples/dotnet/cstsp.cs
@@ -14,6 +14,7 @@
using System;
using System.Collections.Generic;
using Google.OrTools.ConstraintSolver;
+using Google.OrTools.Routing;
class Tsp
{
@@ -77,8 +78,7 @@ static void Solve(int size, int forbidden, int seed)
size + 1, size + 1, true, "dummy");
// Solve, returns a solution if any (owned by RoutingModel).
- RoutingSearchParameters search_parameters =
- operations_research_constraint_solver.DefaultRoutingSearchParameters();
+ RoutingSearchParameters search_parameters = RoutingGlobals.DefaultRoutingSearchParameters();
// Setting first solution heuristic (cheapest addition).
search_parameters.FirstSolutionStrategy = FirstSolutionStrategy.Types.Value.PathCheapestArc;
diff --git a/examples/java/CapacitatedVehicleRoutingProblemWithTimeWindows.java b/examples/java/CapacitatedVehicleRoutingProblemWithTimeWindows.java
index 7f3dae049c4..0344fb8c73b 100644
--- a/examples/java/CapacitatedVehicleRoutingProblemWithTimeWindows.java
+++ b/examples/java/CapacitatedVehicleRoutingProblemWithTimeWindows.java
@@ -15,14 +15,14 @@
import com.google.ortools.Loader;
import com.google.ortools.constraintsolver.Assignment;
-import com.google.ortools.constraintsolver.FirstSolutionStrategy;
import com.google.ortools.constraintsolver.IntVar;
-import com.google.ortools.constraintsolver.RoutingDimension;
-import com.google.ortools.constraintsolver.RoutingIndexManager;
-import com.google.ortools.constraintsolver.RoutingModel;
-import com.google.ortools.constraintsolver.RoutingSearchParameters;
-import com.google.ortools.constraintsolver.RoutingSearchStatus;
-import com.google.ortools.constraintsolver.main;
+import com.google.ortools.routing.FirstSolutionStrategy;
+import com.google.ortools.routing.Globals;
+import com.google.ortools.routing.RoutingDimension;
+import com.google.ortools.routing.RoutingIndexManager;
+import com.google.ortools.routing.RoutingModel;
+import com.google.ortools.routing.RoutingSearchParameters;
+import com.google.ortools.routing.RoutingSearchStatus;
import java.util.ArrayList;
import java.util.List;
import java.util.Random;
@@ -281,7 +281,7 @@ public long applyAsLong(long fromIndex) {
// Solving
RoutingSearchParameters parameters =
- main.defaultRoutingSearchParameters()
+ Globals.defaultRoutingSearchParameters()
.toBuilder()
.setFirstSolutionStrategy(FirstSolutionStrategy.Value.ALL_UNPERFORMED)
.build();
diff --git a/examples/java/RandomTsp.java b/examples/java/RandomTsp.java
index 29bf624e3c7..e7c1e279135 100644
--- a/examples/java/RandomTsp.java
+++ b/examples/java/RandomTsp.java
@@ -16,11 +16,11 @@
import com.google.ortools.Loader;
import com.google.ortools.constraintsolver.Assignment;
-import com.google.ortools.constraintsolver.FirstSolutionStrategy;
-import com.google.ortools.constraintsolver.RoutingIndexManager;
-import com.google.ortools.constraintsolver.RoutingModel;
-import com.google.ortools.constraintsolver.RoutingSearchParameters;
-import com.google.ortools.constraintsolver.main;
+import com.google.ortools.routing.FirstSolutionStrategy;
+import com.google.ortools.routing.RoutingIndexManager;
+import com.google.ortools.routing.RoutingModel;
+import com.google.ortools.routing.RoutingSearchParameters;
+import com.google.ortools.routing.Globals;
// import java.io.*;
// import java.text.*;
// import java.util.*;
@@ -92,7 +92,7 @@ static void solve(int size, int forbidden, int seed) {
// Solve, returns a solution if any (owned by RoutingModel).
RoutingSearchParameters search_parameters =
RoutingSearchParameters.newBuilder()
- .mergeFrom(main.defaultRoutingSearchParameters())
+ .mergeFrom(Globals.defaultRoutingSearchParameters())
.setFirstSolutionStrategy(FirstSolutionStrategy.Value.PATH_CHEAPEST_ARC)
.build();
diff --git a/examples/notebook/examples/cvrptw_plot.ipynb b/examples/notebook/examples/cvrptw_plot.ipynb
index 5f7928f82fd..fff0ae13400 100644
--- a/examples/notebook/examples/cvrptw_plot.ipynb
+++ b/examples/notebook/examples/cvrptw_plot.ipynb
@@ -108,7 +108,7 @@
"from matplotlib import pyplot as plt\n",
"from collections import namedtuple\n",
"from ortools.constraint_solver import pywrapcp\n",
- "from ortools.constraint_solver import routing_enums_pb2\n",
+ "from ortools.routing import enums_pb2\n",
"from datetime import datetime, timedelta\n",
"\n",
"\n",
@@ -693,7 +693,7 @@
" parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
" # Setting first solution heuristic (cheapest addition).\n",
" parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n",
" # Routing: forbids use of TSPOpt neighborhood, (this is the default behaviour)\n",
" parameters.local_search_operators.use_tsp_opt = pywrapcp.BOOL_FALSE\n",
" # Disabling Large Neighborhood Search, (this is the default behaviour)\n",
diff --git a/examples/notebook/examples/prize_collecting_tsp.ipynb b/examples/notebook/examples/prize_collecting_tsp.ipynb
index 1f673e1e5b1..2a7956340e6 100644
--- a/examples/notebook/examples/prize_collecting_tsp.ipynb
+++ b/examples/notebook/examples/prize_collecting_tsp.ipynb
@@ -82,8 +82,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"DISTANCE_MATRIX = [\n",
" [0, 10938, 4542, 2835, 29441, 2171, 1611, 9208, 9528, 11111, 16120, 22606, 22127, 20627, 21246, 23387, 16697, 33609, 26184, 24772, 22644, 20655, 30492, 23296, 32979, 18141, 19248, 17129, 17192, 15645, 12658, 11210, 12094, 13175, 18162, 4968, 12308, 10084, 13026, 15056],\n",
@@ -175,13 +175,13 @@
" all_nodes = range(num_nodes)\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" num_nodes,\n",
" num_vehicles,\n",
" depot)\n",
"\n",
" # Create routing model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -214,11 +214,11 @@
" VISIT_VALUES[node])\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n",
" search_parameters.time_limit.FromSeconds(15)\n",
" #search_parameters.log_search = True\n",
"\n",
diff --git a/examples/notebook/examples/prize_collecting_vrp.ipynb b/examples/notebook/examples/prize_collecting_vrp.ipynb
index 1d0da49a882..2fb39f49093 100644
--- a/examples/notebook/examples/prize_collecting_vrp.ipynb
+++ b/examples/notebook/examples/prize_collecting_vrp.ipynb
@@ -82,8 +82,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"DISTANCE_MATRIX = [\n",
" [0, 10938, 4542, 2835, 29441, 2171, 1611, 9208, 9528, 11111, 16120, 22606, 22127, 20627, 21246, 23387, 16697, 33609, 26184, 24772, 22644, 20655, 30492, 23296, 32979, 18141, 19248, 17129, 17192, 15645, 12658, 11210, 12094, 13175, 18162, 4968, 12308, 10084, 13026, 15056],\n",
@@ -151,6 +151,8 @@
" total_distance = 0\n",
" total_value_collected = 0\n",
" for v in range(manager.GetNumberOfVehicles()):\n",
+ " if not routing.IsVehicleUsed(assignment, v):\n",
+ " continue\n",
" index = routing.Start(v)\n",
" plan_output = f'Route for vehicle {v}:\\n'\n",
" route_distance = 0\n",
@@ -181,13 +183,13 @@
" all_nodes = range(num_nodes)\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" num_nodes,\n",
" num_vehicles,\n",
" depot)\n",
"\n",
" # Create routing model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -220,11 +222,11 @@
" VISIT_VALUES[node])\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)\n",
" search_parameters.time_limit.FromSeconds(15)\n",
" #search_parameters.log_search = True\n",
"\n",
diff --git a/examples/notebook/examples/random_tsp.ipynb b/examples/notebook/examples/random_tsp.ipynb
index b07e34c39cf..62e29e9e189 100644
--- a/examples/notebook/examples/random_tsp.ipynb
+++ b/examples/notebook/examples/random_tsp.ipynb
@@ -96,8 +96,8 @@
"from functools import partial\n",
"import random\n",
"\n",
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"parser = argparse.ArgumentParser()\n",
"\n",
@@ -161,12 +161,12 @@
" # Second argument = 1 to build a single tour (it's a TSP).\n",
" # Nodes are indexed from 0 to args_tsp_size - 1, by default the start of\n",
" # the route is node 0.\n",
- " manager = pywrapcp.RoutingIndexManager(args.tsp_size, 1, 0)\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " manager = pywraprouting.RoutingIndexManager(args.tsp_size, 1, 0)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" # Setting first solution heuristic (cheapest addition).\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)\n",
"\n",
" # Setting the cost function.\n",
" # Put a callback to the distance accessor here. The callback takes two\n",
diff --git a/examples/notebook/examples/transit_time.ipynb b/examples/notebook/examples/transit_time.ipynb
index 8f75f9028ef..f8fa14e7939 100644
--- a/examples/notebook/examples/transit_time.ipynb
+++ b/examples/notebook/examples/transit_time.ipynb
@@ -89,7 +89,6 @@
"outputs": [],
"source": [
"from ortools.constraint_solver import pywrapcp\n",
- "from ortools.constraint_solver import routing_enums_pb2\n",
"\n",
"\n",
"###########################\n",
diff --git a/examples/notebook/constraint_solver/cvrp_reload.ipynb b/examples/notebook/routing/cvrp_reload.ipynb
similarity index 96%
rename from examples/notebook/constraint_solver/cvrp_reload.ipynb
rename to examples/notebook/routing/cvrp_reload.ipynb
index 2e70d31c2e0..3d999c4ce1c 100644
--- a/examples/notebook/constraint_solver/cvrp_reload.ipynb
+++ b/examples/notebook/routing/cvrp_reload.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -111,8 +111,8 @@
"source": [
"from functools import partial\n",
"\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "from ortools.constraint_solver import routing_enums_pb2\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"###########################\n",
@@ -446,12 +446,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" data[\"num_locations\"], data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Define weight of each edge\n",
" distance_evaluator_index = routing.RegisterTransitCallback(\n",
@@ -475,12 +475,12 @@
" add_time_window_constraints(routing, manager, data, time_evaluator_index)\n",
"\n",
" # Setting first solution heuristic (cheapest addition).\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" ) # pylint: disable=no-member\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" search_parameters.time_limit.FromSeconds(3)\n",
"\n",
diff --git a/examples/notebook/routing/cvrptw.ipynb b/examples/notebook/routing/cvrptw.ipynb
new file mode 100644
index 00000000000..421bd3000dd
--- /dev/null
+++ b/examples/notebook/routing/cvrptw.ipynb
@@ -0,0 +1,340 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "google",
+ "metadata": {},
+ "source": [
+ "##### Copyright 2025 Google LLC."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "apache",
+ "metadata": {},
+ "source": [
+ "Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "you may not use this file except in compliance with the License.\n",
+ "You may obtain a copy of the License at\n",
+ "\n",
+ " http://www.apache.org/licenses/LICENSE-2.0\n",
+ "\n",
+ "Unless required by applicable law or agreed to in writing, software\n",
+ "distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "See the License for the specific language governing permissions and\n",
+ "limitations under the License.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "basename",
+ "metadata": {},
+ "source": [
+ "# cvrptw"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "link",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "doc",
+ "metadata": {},
+ "source": [
+ "First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "install",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install ortools"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "description",
+ "metadata": {},
+ "source": [
+ "\n",
+ "Capacited Vehicles Routing Problem with Time Windows (CVRPTW).\n",
+ "\n",
+ "This is a sample using the routing library python wrapper to solve a VRP\n",
+ "problem.\n",
+ "A description of the problem can be found here:\n",
+ "http://en.wikipedia.org/wiki/Vehicle_routing_problem.\n",
+ "\n",
+ "Distances are in meters.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "code",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
+ "\n",
+ "FirstSolutionStrategy = enums_pb2.FirstSolutionStrategy\n",
+ "LocalSearchMetaheuristic = enums_pb2.LocalSearchMetaheuristic\n",
+ "RoutingSearchStatus = enums_pb2.RoutingSearchStatus\n",
+ "\n",
+ "\n",
+ "def create_data_model():\n",
+ " \"\"\"Stores the data for the problem.\"\"\"\n",
+ " data = {}\n",
+ " data[\"distance_matrix\"] = [\n",
+ " # fmt: off\n",
+ " [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],\n",
+ " [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],\n",
+ " [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],\n",
+ " [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],\n",
+ " [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],\n",
+ " [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],\n",
+ " [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],\n",
+ " [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],\n",
+ " [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],\n",
+ " [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],\n",
+ " [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],\n",
+ " [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],\n",
+ " [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],\n",
+ " [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],\n",
+ " [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],\n",
+ " [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],\n",
+ " [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],\n",
+ " # fmt: on\n",
+ " ]\n",
+ " data[\"time_matrix\"] = [\n",
+ " [0, 6, 9, 8, 7, 3, 6, 2, 3, 2, 6, 6, 4, 4, 5, 9, 7],\n",
+ " [6, 0, 8, 3, 2, 6, 8, 4, 8, 8, 13, 7, 5, 8, 12, 10, 14],\n",
+ " [9, 8, 0, 11, 10, 6, 3, 9, 5, 8, 4, 15, 14, 13, 9, 18, 9],\n",
+ " [8, 3, 11, 0, 1, 7, 10, 6, 10, 10, 14, 6, 7, 9, 14, 6, 16],\n",
+ " [7, 2, 10, 1, 0, 6, 9, 4, 8, 9, 13, 4, 6, 8, 12, 8, 14],\n",
+ " [3, 6, 6, 7, 6, 0, 2, 3, 2, 2, 7, 9, 7, 7, 6, 12, 8],\n",
+ " [6, 8, 3, 10, 9, 2, 0, 6, 2, 5, 4, 12, 10, 10, 6, 15, 5],\n",
+ " [2, 4, 9, 6, 4, 3, 6, 0, 4, 4, 8, 5, 4, 3, 7, 8, 10],\n",
+ " [3, 8, 5, 10, 8, 2, 2, 4, 0, 3, 4, 9, 8, 7, 3, 13, 6],\n",
+ " [2, 8, 8, 10, 9, 2, 5, 4, 3, 0, 4, 6, 5, 4, 3, 9, 5],\n",
+ " [6, 13, 4, 14, 13, 7, 4, 8, 4, 4, 0, 10, 9, 8, 4, 13, 4],\n",
+ " [6, 7, 15, 6, 4, 9, 12, 5, 9, 6, 10, 0, 1, 3, 7, 3, 10],\n",
+ " [4, 5, 14, 7, 6, 7, 10, 4, 8, 5, 9, 1, 0, 2, 6, 4, 8],\n",
+ " [4, 8, 13, 9, 8, 7, 10, 3, 7, 4, 8, 3, 2, 0, 4, 5, 6],\n",
+ " [5, 12, 9, 14, 12, 6, 6, 7, 3, 3, 4, 7, 6, 4, 0, 9, 2],\n",
+ " [9, 10, 18, 6, 8, 12, 15, 8, 13, 9, 13, 3, 4, 5, 9, 0, 9],\n",
+ " [7, 14, 9, 16, 14, 8, 5, 10, 6, 5, 4, 10, 8, 6, 2, 9, 0],\n",
+ " ]\n",
+ " data[\"time_windows\"] = [\n",
+ " (0, 30), # depot\n",
+ " (7, 12), # 1\n",
+ " (10, 15), # 2\n",
+ " (16, 18), # 3\n",
+ " (10, 13), # 4\n",
+ " (0, 5), # 5\n",
+ " (5, 10), # 6\n",
+ " (0, 4), # 7\n",
+ " (5, 10), # 8\n",
+ " (0, 3), # 9\n",
+ " (10, 16), # 10\n",
+ " (10, 15), # 11\n",
+ " (0, 5), # 12\n",
+ " (5, 10), # 13\n",
+ " (7, 8), # 14\n",
+ " (10, 15), # 15\n",
+ " (11, 15), # 16\n",
+ " ]\n",
+ " assert len(data[\"distance_matrix\"]) == len(data[\"time_matrix\"])\n",
+ " assert len(data[\"time_matrix\"]) == len(data[\"time_windows\"])\n",
+ " data[\"demands\"] = [0, 1, 1, 2, 4, 2, 4, 8, 8, 1, 2, 1, 2, 4, 4, 8, 8]\n",
+ " assert len(data[\"distance_matrix\"]) == len(data[\"demands\"])\n",
+ " data[\"vehicle_capacities\"] = [15, 15, 15, 15]\n",
+ " data[\"num_vehicles\"] = len(data[\"vehicle_capacities\"])\n",
+ " data[\"depot\"] = 0\n",
+ " return data\n",
+ "\n",
+ "\n",
+ "def print_solution(manager, routing, solution):\n",
+ " \"\"\"Prints solution on console.\"\"\"\n",
+ " status = routing.status()\n",
+ " print(f\"Status: {RoutingSearchStatus.Value.Name(status)}\")\n",
+ " if (\n",
+ " status != RoutingSearchStatus.ROUTING_OPTIMAL\n",
+ " and status != RoutingSearchStatus.ROUTING_SUCCESS\n",
+ " ):\n",
+ " print(\"No solution found!\")\n",
+ " return\n",
+ " print(f\"Objective: {solution.ObjectiveValue()}\")\n",
+ " time_dimension = routing.GetDimensionOrDie(\"Time\")\n",
+ " capacity_dimension = routing.GetDimensionOrDie(\"Capacity\")\n",
+ " total_distance = 0\n",
+ " total_time = 0\n",
+ " total_load = 0\n",
+ " for vehicle_id in range(manager.GetNumberOfVehicles()):\n",
+ " if not routing.IsVehicleUsed(solution, vehicle_id):\n",
+ " continue\n",
+ " index = routing.Start(vehicle_id)\n",
+ " plan_output = f\"Route for vehicle {vehicle_id}:\\n\"\n",
+ " route_distance = 0\n",
+ " while not routing.IsEnd(index):\n",
+ " time_var = time_dimension.CumulVar(index)\n",
+ " capacity_var = capacity_dimension.CumulVar(index)\n",
+ " plan_output += (\n",
+ " f\"Node_{manager.IndexToNode(index)}\"\n",
+ " f\" TW:[{time_var.Min()},{time_var.Max()}]\"\n",
+ " f\" Time({solution.Min(time_var)},{solution.Max(time_var)})\"\n",
+ " f\" Load({solution.Value(capacity_var)}/{capacity_var.Max()})\"\n",
+ " \" -> \"\n",
+ " )\n",
+ " previous_index = index\n",
+ " index = solution.Value(routing.NextVar(index))\n",
+ " route_distance += routing.GetArcCostForVehicle(\n",
+ " previous_index, index, vehicle_id\n",
+ " )\n",
+ " time_var = time_dimension.CumulVar(index)\n",
+ " capacity_var = capacity_dimension.CumulVar(index)\n",
+ " plan_output += (\n",
+ " f\"Node_{manager.IndexToNode(index)}\"\n",
+ " f\" Time({solution.Min(time_var)},{solution.Max(time_var)})\"\n",
+ " f\" Load({solution.Value(capacity_var)}/{capacity_var.Max()})\"\n",
+ " \"\\n\"\n",
+ " )\n",
+ " plan_output += f\"Distance of the route: {route_distance}m\\n\"\n",
+ " plan_output += f\"Time of the route: {solution.Min(time_var)}min\\n\"\n",
+ " plan_output += f\"Load of the route: {solution.Value(capacity_var)}\\n\"\n",
+ " print(plan_output)\n",
+ " total_distance += route_distance\n",
+ " total_time += solution.Min(time_var)\n",
+ " total_load += solution.Value(capacity_var)\n",
+ " print(f\"Total distance of all routes: {total_distance}m\")\n",
+ " print(f\"Total time of all routes: {total_time}min\")\n",
+ " print(f\"Total load of all routes: {total_load}\")\n",
+ "\n",
+ "\n",
+ "def main():\n",
+ " \"\"\"Entry point of the program.\"\"\"\n",
+ " # Instantiate the data problem.\n",
+ " data = create_data_model()\n",
+ "\n",
+ " # Create the routing index manager.\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
+ " len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
+ " )\n",
+ "\n",
+ " # Create Routing Model.\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
+ "\n",
+ " # Create and register a distance transit callback.\n",
+ " def distance_callback(from_index, to_index):\n",
+ " \"\"\"Returns the distance between the two nodes.\"\"\"\n",
+ " # Convert from routing variable Index to distance matrix NodeIndex.\n",
+ " from_node = manager.IndexToNode(from_index)\n",
+ " to_node = manager.IndexToNode(to_index)\n",
+ " return data[\"distance_matrix\"][from_node][to_node]\n",
+ "\n",
+ " distance_callback_index = routing.RegisterTransitCallback(distance_callback)\n",
+ "\n",
+ " # Define cost of each arc.\n",
+ " routing.SetArcCostEvaluatorOfAllVehicles(distance_callback_index)\n",
+ "\n",
+ " # Add Time Windows constraint.\n",
+ " def time_callback(from_index, to_index):\n",
+ " \"\"\"Returns the travel time between the two nodes.\"\"\"\n",
+ " # Convert from routing variable Index to time matrix NodeIndex.\n",
+ " from_node = manager.IndexToNode(from_index)\n",
+ " to_node = manager.IndexToNode(to_index)\n",
+ " return data[\"time_matrix\"][from_node][to_node]\n",
+ "\n",
+ " time_callback_index = routing.RegisterTransitCallback(time_callback)\n",
+ " routing.AddDimension(\n",
+ " time_callback_index,\n",
+ " 30, # allow waiting time\n",
+ " 30, # maximum time per vehicle\n",
+ " False, # Don't force start cumul to zero.\n",
+ " \"Time\",\n",
+ " )\n",
+ " time_dimension = routing.GetDimensionOrDie(\"Time\")\n",
+ " # Add time window constraints for each location except depot.\n",
+ " for location_idx, time_window in enumerate(data[\"time_windows\"]):\n",
+ " if location_idx == data[\"depot\"]:\n",
+ " continue\n",
+ " index = manager.NodeToIndex(location_idx)\n",
+ " time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])\n",
+ " # Add time window constraints for each vehicle start node.\n",
+ " depot_idx = data[\"depot\"]\n",
+ " for vehicle_id in range(data[\"num_vehicles\"]):\n",
+ " index = routing.Start(vehicle_id)\n",
+ " time_dimension.CumulVar(index).SetRange(\n",
+ " data[\"time_windows\"][depot_idx][0], data[\"time_windows\"][depot_idx][1]\n",
+ " )\n",
+ "\n",
+ " # Instantiate route start and end times to produce feasible times.\n",
+ " for i in range(data[\"num_vehicles\"]):\n",
+ " routing.AddVariableMinimizedByFinalizer(\n",
+ " time_dimension.CumulVar(routing.Start(i))\n",
+ " )\n",
+ " routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))\n",
+ "\n",
+ " # Add Capacity constraint.\n",
+ " def demand_callback(from_index):\n",
+ " \"\"\"Returns the demand of the node.\"\"\"\n",
+ " # Convert from routing variable Index to demands NodeIndex.\n",
+ " from_node = manager.IndexToNode(from_index)\n",
+ " return data[\"demands\"][from_node]\n",
+ "\n",
+ " demand_callback_index = routing.RegisterUnaryTransitCallback(demand_callback)\n",
+ " routing.AddDimensionWithVehicleCapacity(\n",
+ " demand_callback_index,\n",
+ " 0, # null capacity slack\n",
+ " data[\"vehicle_capacities\"], # vehicle maximum capacities\n",
+ " True, # start cumul to zero\n",
+ " \"Capacity\",\n",
+ " )\n",
+ "\n",
+ " # Setting first solution heuristic.\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
+ " search_parameters.first_solution_strategy = (\n",
+ " FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION\n",
+ " )\n",
+ " search_parameters.local_search_metaheuristic = (\n",
+ " LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " )\n",
+ " search_parameters.time_limit.FromSeconds(3)\n",
+ "\n",
+ " # Solve the problem.\n",
+ " solution = routing.SolveWithParameters(search_parameters)\n",
+ "\n",
+ " # Print solution on console.\n",
+ " print_solution(manager, routing, solution)\n",
+ "\n",
+ "\n",
+ "main()\n",
+ "\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/notebook/constraint_solver/cvrptw_break.ipynb b/examples/notebook/routing/cvrptw_break.ipynb
similarity index 95%
rename from examples/notebook/constraint_solver/cvrptw_break.ipynb
rename to examples/notebook/routing/cvrptw_break.ipynb
index ad4a747083e..cf0f75be8ca 100644
--- a/examples/notebook/constraint_solver/cvrptw_break.ipynb
+++ b/examples/notebook/routing/cvrptw_break.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -92,9 +92,8 @@
"outputs": [],
"source": [
"import functools\n",
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -350,12 +349,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" data[\"numlocations_\"], data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Define weight of each edge\n",
" distance_evaluator_index = routing.RegisterTransitCallback(\n",
@@ -401,9 +400,9 @@
" )\n",
"\n",
" # Setting first solution heuristic (cheapest addition).\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" ) # pylint: disable=no-member\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/simple_routing_program.ipynb b/examples/notebook/routing/simple_routing_program.ipynb
similarity index 83%
rename from examples/notebook/constraint_solver/simple_routing_program.ipynb
rename to examples/notebook/routing/simple_routing_program.ipynb
index 57d54942cd5..d00e47206a0 100644
--- a/examples/notebook/constraint_solver/simple_routing_program.ipynb
+++ b/examples/notebook/routing/simple_routing_program.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def main():\n",
@@ -96,10 +95,10 @@
" depot = 0\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(num_locations, num_vehicles, depot)\n",
+ " manager = pywraprouting.RoutingIndexManager(num_locations, num_vehicles, depot)\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -115,9 +114,9 @@
" routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" ) # pylint: disable=no-member\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/routing/tsp.ipynb b/examples/notebook/routing/tsp.ipynb
new file mode 100644
index 00000000000..21356a6db6c
--- /dev/null
+++ b/examples/notebook/routing/tsp.ipynb
@@ -0,0 +1,217 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "google",
+ "metadata": {},
+ "source": [
+ "##### Copyright 2025 Google LLC."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "apache",
+ "metadata": {},
+ "source": [
+ "Licensed under the Apache License, Version 2.0 (the \"License\");\n",
+ "you may not use this file except in compliance with the License.\n",
+ "You may obtain a copy of the License at\n",
+ "\n",
+ " http://www.apache.org/licenses/LICENSE-2.0\n",
+ "\n",
+ "Unless required by applicable law or agreed to in writing, software\n",
+ "distributed under the License is distributed on an \"AS IS\" BASIS,\n",
+ "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
+ "See the License for the specific language governing permissions and\n",
+ "limitations under the License.\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "basename",
+ "metadata": {},
+ "source": [
+ "# tsp"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "link",
+ "metadata": {},
+ "source": [
+ ""
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "doc",
+ "metadata": {},
+ "source": [
+ "First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "install",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "%pip install ortools"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "description",
+ "metadata": {},
+ "source": [
+ "\n",
+ "Simple Travelling Salesman Problem.\n",
+ "\n",
+ "A description of the problem can be found here:\n",
+ "http://en.wikipedia.org/wiki/Travelling_salesperson_problem.\n",
+ "\n"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "code",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import parameters_pb2\n",
+ "from ortools.routing.python import model\n",
+ "\n",
+ "FirstSolutionStrategy = enums_pb2.FirstSolutionStrategy\n",
+ "RoutingSearchStatus = enums_pb2.RoutingSearchStatus\n",
+ "\n",
+ "\n",
+ "def create_data_model():\n",
+ " \"\"\"Stores the data for the problem.\"\"\"\n",
+ " data = {}\n",
+ " # Locations in block units\n",
+ " locations = [\n",
+ " # fmt:off\n",
+ " (4, 4), # depot\n",
+ " (2, 0), (8, 0), # locations to visit\n",
+ " (0, 1), (1, 1),\n",
+ " (5, 2), (7, 2),\n",
+ " (3, 3), (6, 3),\n",
+ " (5, 5), (8, 5),\n",
+ " (1, 6), (2, 6),\n",
+ " (3, 7), (6, 7),\n",
+ " (0, 8), (7, 8)\n",
+ " # fmt:on\n",
+ " ]\n",
+ " # Convert locations in meters using a city block dimension of 114m x 80m.\n",
+ " data[\"locations\"] = [(l[0] * 114, l[1] * 80) for l in locations]\n",
+ " data[\"num_vehicles\"] = 1\n",
+ " data[\"depot\"] = 0\n",
+ " return data\n",
+ "\n",
+ "\n",
+ "def create_distance_callback(data, manager):\n",
+ " \"\"\"Creates callback to return distance between points.\"\"\"\n",
+ " distances_ = {}\n",
+ " index_manager_ = manager\n",
+ " # precompute distance between location to have distance callback in O(1)\n",
+ " for from_counter, from_node in enumerate(data[\"locations\"]):\n",
+ " distances_[from_counter] = {}\n",
+ " for to_counter, to_node in enumerate(data[\"locations\"]):\n",
+ " if from_counter == to_counter:\n",
+ " distances_[from_counter][to_counter] = 0\n",
+ " else:\n",
+ " distances_[from_counter][to_counter] = abs(\n",
+ " from_node[0] - to_node[0]\n",
+ " ) + abs(from_node[1] - to_node[1])\n",
+ "\n",
+ " def distance_callback(from_index, to_index):\n",
+ " \"\"\"Returns the manhattan distance between the two nodes.\"\"\"\n",
+ " # Convert from routing variable Index to distance matrix NodeIndex.\n",
+ " from_node = index_manager_.index_to_node(from_index)\n",
+ " to_node = index_manager_.index_to_node(to_index)\n",
+ " return distances_[from_node][to_node]\n",
+ "\n",
+ " return distance_callback\n",
+ "\n",
+ "\n",
+ "def print_solution(manager, routing, solution):\n",
+ " \"\"\"Prints assignment on console.\"\"\"\n",
+ " status = routing.status()\n",
+ " print(f\"Status: {RoutingSearchStatus.Value.Name(status)}\")\n",
+ " if (\n",
+ " status != RoutingSearchStatus.ROUTING_OPTIMAL\n",
+ " and status != RoutingSearchStatus.ROUTING_SUCCESS\n",
+ " ):\n",
+ " print(\"No solution found!\")\n",
+ " return\n",
+ " print(f\"Objective: {solution.objective_value()}\")\n",
+ " index = routing.start(0)\n",
+ " plan_output = \"Route for vehicle 0:\\n\"\n",
+ " route_distance = 0\n",
+ " while not routing.is_end(index):\n",
+ " plan_output += f\" {manager.index_to_node(index)} ->\"\n",
+ " previous_index = index\n",
+ " index = solution.value(routing.next_var(index))\n",
+ " route_distance += routing.get_arc_cost_for_vehicle(previous_index, index, 0)\n",
+ " plan_output += f\" {manager.index_to_node(index)}\\n\"\n",
+ " plan_output += f\"Distance of the route: {route_distance}m\\n\"\n",
+ " print(plan_output)\n",
+ "\n",
+ "\n",
+ "def main():\n",
+ " \"\"\"Entry point of the program.\"\"\"\n",
+ " # Instantiate the data problem.\n",
+ " data = create_data_model()\n",
+ "\n",
+ " # Create the routing index manager.\n",
+ " manager = model.RoutingIndexManager(\n",
+ " len(data[\"locations\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
+ " )\n",
+ "\n",
+ " # Create Routing Model.\n",
+ " routing = model.RoutingModel(manager)\n",
+ "\n",
+ " # Create and register a transit callback.\n",
+ " distance_callback = create_distance_callback(data, manager)\n",
+ " transit_callback_index = routing.register_transit_callback(distance_callback)\n",
+ "\n",
+ " # Define cost of each arc.\n",
+ " routing.set_arc_cost_evaluator_of_all_vehicles(transit_callback_index)\n",
+ "\n",
+ " # Setting first solution heuristic.\n",
+ " search_parameters: parameters_pb2.RoutingSearchParameters = (\n",
+ " model.default_routing_search_parameters()\n",
+ " )\n",
+ " search_parameters.first_solution_strategy = FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ "\n",
+ " # Solve the problem.\n",
+ " solution = routing.solve()\n",
+ " # solution = routing.solve_with_parameters(search_parameters)\n",
+ "\n",
+ " # Print solution on console.\n",
+ " print_solution(manager, routing, solution)\n",
+ "\n",
+ "\n",
+ "main()\n",
+ "\n"
+ ]
+ }
+ ],
+ "metadata": {
+ "language_info": {
+ "name": "python"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/examples/notebook/constraint_solver/tsp_circuit_board.ipynb b/examples/notebook/routing/tsp_circuit_board.ipynb
similarity index 91%
rename from examples/notebook/constraint_solver/tsp_circuit_board.ipynb
rename to examples/notebook/routing/tsp_circuit_board.ipynb
index 7cadec642c2..1fe1bf4bfa8 100644
--- a/examples/notebook/constraint_solver/tsp_circuit_board.ipynb
+++ b/examples/notebook/routing/tsp_circuit_board.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -84,9 +84,8 @@
"outputs": [],
"source": [
"import math\n",
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -177,8 +176,8 @@
" index = solution.Value(routing.NextVar(index))\n",
" route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n",
" plan_output += f\" {manager.IndexToNode(index)}\\n\"\n",
+ " plan_output += f\"Route distance: {route_distance}mm\\n\"\n",
" print(plan_output)\n",
- " plan_output += f\"Objective: {route_distance}m\\n\"\n",
"\n",
"\n",
"def main():\n",
@@ -187,12 +186,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"locations\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" distance_matrix = compute_euclidean_distance_matrix(data[\"locations\"])\n",
"\n",
@@ -209,9 +208,9 @@
" routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/tsp_cities.ipynb b/examples/notebook/routing/tsp_cities.ipynb
similarity index 88%
rename from examples/notebook/constraint_solver/tsp_cities.ipynb
rename to examples/notebook/routing/tsp_cities.ipynb
index 1f05f969441..b9cac5110a1 100644
--- a/examples/notebook/constraint_solver/tsp_cities.ipynb
+++ b/examples/notebook/routing/tsp_cities.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -133,12 +132,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
"\n",
" def distance_callback(from_index, to_index):\n",
@@ -154,9 +153,9 @@
" routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/tsp_distance_matrix.ipynb b/examples/notebook/routing/tsp_distance_matrix.ipynb
similarity index 89%
rename from examples/notebook/constraint_solver/tsp_distance_matrix.ipynb
rename to examples/notebook/routing/tsp_distance_matrix.ipynb
index da4b7f4e199..1fc7907caf3 100644
--- a/examples/notebook/constraint_solver/tsp_distance_matrix.ipynb
+++ b/examples/notebook/routing/tsp_distance_matrix.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -139,12 +138,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -160,9 +159,9 @@
" routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/tsp.ipynb b/examples/notebook/routing/tsp_legacy.ipynb
similarity index 79%
rename from examples/notebook/constraint_solver/tsp.ipynb
rename to examples/notebook/routing/tsp_legacy.ipynb
index 6021ea54fe2..3b35500736b 100644
--- a/examples/notebook/constraint_solver/tsp.ipynb
+++ b/examples/notebook/routing/tsp_legacy.ipynb
@@ -31,7 +31,7 @@
"id": "basename",
"metadata": {},
"source": [
- "# tsp"
+ "# tsp_legacy"
]
},
{
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -87,9 +87,11 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
+ "FirstSolutionStrategy = enums_pb2.FirstSolutionStrategy\n",
+ "RoutingSearchStatus = enums_pb2.RoutingSearchStatus\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -141,16 +143,24 @@
" return distance_callback\n",
"\n",
"\n",
- "def print_solution(manager, routing, assignment):\n",
+ "def print_solution(manager, routing, solution):\n",
" \"\"\"Prints assignment on console.\"\"\"\n",
- " print(f\"Objective: {assignment.ObjectiveValue()}\")\n",
+ " status = routing.status()\n",
+ " print(f\"Status: {RoutingSearchStatus.Value.Name(status)}\")\n",
+ " if (\n",
+ " status != RoutingSearchStatus.ROUTING_OPTIMAL\n",
+ " and status != RoutingSearchStatus.ROUTING_SUCCESS\n",
+ " ):\n",
+ " print(\"No solution found!\")\n",
+ " return\n",
+ " print(f\"Objective: {solution.ObjectiveValue()}\")\n",
" index = routing.Start(0)\n",
" plan_output = \"Route for vehicle 0:\\n\"\n",
" route_distance = 0\n",
" while not routing.IsEnd(index):\n",
" plan_output += f\" {manager.IndexToNode(index)} ->\"\n",
" previous_index = index\n",
- " index = assignment.Value(routing.NextVar(index))\n",
+ " index = solution.Value(routing.NextVar(index))\n",
" route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)\n",
" plan_output += f\" {manager.IndexToNode(index)}\\n\"\n",
" plan_output += f\"Distance of the route: {route_distance}m\\n\"\n",
@@ -163,12 +173,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"locations\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" distance_callback = create_distance_callback(data, manager)\n",
@@ -178,17 +188,14 @@
" routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
- " search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
- " )\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
+ " search_parameters.first_solution_strategy = FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
"\n",
" # Solve the problem.\n",
- " assignment = routing.SolveWithParameters(search_parameters)\n",
+ " solution = routing.SolveWithParameters(search_parameters)\n",
"\n",
" # Print solution on console.\n",
- " if assignment:\n",
- " print_solution(manager, routing, assignment)\n",
+ " print_solution(manager, routing, solution)\n",
"\n",
"\n",
"main()\n",
diff --git a/examples/notebook/constraint_solver/vrp.ipynb b/examples/notebook/routing/vrp.ipynb
similarity index 83%
rename from examples/notebook/constraint_solver/vrp.ipynb
rename to examples/notebook/routing/vrp.ipynb
index 8f3808c22a3..9f644488a2a 100644
--- a/examples/notebook/constraint_solver/vrp.ipynb
+++ b/examples/notebook/routing/vrp.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -91,9 +91,11 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
+ "FirstSolutionStrategy = enums_pb2.FirstSolutionStrategy\n",
+ "RoutingSearchStatus = enums_pb2.RoutingSearchStatus\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -125,8 +127,16 @@
" return data\n",
"\n",
"\n",
- "def print_solution(data, manager, routing, solution):\n",
- " \"\"\"Prints solution on console.\"\"\"\n",
+ "def print_solution(manager, routing, solution):\n",
+ " \"\"\"Prints assignment on console.\"\"\"\n",
+ " status = routing.status()\n",
+ " print(f\"Status: {RoutingSearchStatus.Value.Name(status)}\")\n",
+ " if (\n",
+ " status != RoutingSearchStatus.ROUTING_OPTIMAL\n",
+ " and status != RoutingSearchStatus.ROUTING_SUCCESS\n",
+ " ):\n",
+ " print(\"No solution found!\")\n",
+ " return\n",
" print(f\"Objective: {solution.ObjectiveValue()}\")\n",
" total_distance = 0\n",
" for vehicle_index in range(manager.GetNumberOfVehicles()):\n",
@@ -156,12 +166,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -177,19 +187,14 @@
" routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
- " search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
- " )\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
+ " search_parameters.first_solution_strategy = FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
"\n",
" # Solve the problem.\n",
" solution = routing.SolveWithParameters(search_parameters)\n",
"\n",
" # Print solution on console.\n",
- " if solution:\n",
- " print_solution(data, manager, routing, solution)\n",
- " else:\n",
- " print(\"No solution found !\")\n",
+ " print_solution(manager, routing, solution)\n",
"\n",
"\n",
"main()\n",
diff --git a/examples/notebook/constraint_solver/vrp_breaks.ipynb b/examples/notebook/routing/vrp_breaks.ipynb
similarity index 89%
rename from examples/notebook/constraint_solver/vrp_breaks.ipynb
rename to examples/notebook/routing/vrp_breaks.ipynb
index 607fd6aaaef..99ce9691934 100644
--- a/examples/notebook/constraint_solver/vrp_breaks.ipynb
+++ b/examples/notebook/routing/vrp_breaks.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -75,12 +75,12 @@
"\n",
"Vehicle Routing Problem (VRP) with breaks.\n",
"\n",
- " This is a sample using the routing library python wrapper to solve a VRP\n",
- " problem.\n",
- " A description of the problem can be found here:\n",
- " http://en.wikipedia.org/wiki/Vehicle_routing_problem.\n",
+ "This is a sample using the routing library python wrapper to solve a VRP\n",
+ "problem.\n",
+ "A description of the problem can be found here:\n",
+ "http://en.wikipedia.org/wiki/Vehicle_routing_problem.\n",
"\n",
- " Durations are in minutes.\n",
+ "Durations are in minutes.\n",
"\n"
]
},
@@ -91,9 +91,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -170,12 +169,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"time_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def time_callback(from_index, to_index):\n",
@@ -225,12 +224,12 @@
" )\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" # search_parameters.log_search = True\n",
" search_parameters.time_limit.FromSeconds(2)\n",
diff --git a/examples/notebook/constraint_solver/vrp_breaks_from_start.ipynb b/examples/notebook/routing/vrp_breaks_from_start.ipynb
similarity index 92%
rename from examples/notebook/constraint_solver/vrp_breaks_from_start.ipynb
rename to examples/notebook/routing/vrp_breaks_from_start.ipynb
index 354ee332d2f..c4e5aa129a1 100644
--- a/examples/notebook/constraint_solver/vrp_breaks_from_start.ipynb
+++ b/examples/notebook/routing/vrp_breaks_from_start.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -92,8 +92,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"\n",
@@ -174,12 +174,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"time_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def time_callback(from_index, to_index):\n",
@@ -233,12 +233,12 @@
" )\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" # search_parameters.log_search = True\n",
" search_parameters.time_limit.FromSeconds(2)\n",
diff --git a/examples/notebook/constraint_solver/vrp_capacity.ipynb b/examples/notebook/routing/vrp_capacity.ipynb
similarity index 90%
rename from examples/notebook/constraint_solver/vrp_capacity.ipynb
rename to examples/notebook/routing/vrp_capacity.ipynb
index 66bfc3158bf..82106b3fc07 100644
--- a/examples/notebook/constraint_solver/vrp_capacity.ipynb
+++ b/examples/notebook/routing/vrp_capacity.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -156,12 +155,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -193,12 +192,12 @@
" )\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" search_parameters.time_limit.FromSeconds(1)\n",
"\n",
diff --git a/examples/notebook/constraint_solver/vrp_drop_nodes.ipynb b/examples/notebook/routing/vrp_drop_nodes.ipynb
similarity index 91%
rename from examples/notebook/constraint_solver/vrp_drop_nodes.ipynb
rename to examples/notebook/routing/vrp_drop_nodes.ipynb
index 19f5d019b5f..f7dfee1d5df 100644
--- a/examples/notebook/constraint_solver/vrp_drop_nodes.ipynb
+++ b/examples/notebook/routing/vrp_drop_nodes.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -165,12 +164,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -206,12 +205,12 @@
" routing.AddDisjunction([manager.NodeToIndex(node)], penalty)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" search_parameters.time_limit.FromSeconds(1)\n",
"\n",
diff --git a/examples/notebook/constraint_solver/vrp_global_span.ipynb b/examples/notebook/routing/vrp_global_span.ipynb
similarity index 91%
rename from examples/notebook/constraint_solver/vrp_global_span.ipynb
rename to examples/notebook/routing/vrp_global_span.ipynb
index a652e64d266..0b844c935be 100644
--- a/examples/notebook/constraint_solver/vrp_global_span.ipynb
+++ b/examples/notebook/routing/vrp_global_span.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -91,9 +91,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -156,12 +155,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -189,9 +188,9 @@
" distance_dimension.SetGlobalSpanCostCoefficient(100)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/vrp_initial_routes.ipynb b/examples/notebook/routing/vrp_initial_routes.ipynb
similarity index 91%
rename from examples/notebook/constraint_solver/vrp_initial_routes.ipynb
rename to examples/notebook/routing/vrp_initial_routes.ipynb
index 1a81fc82464..4433aa8d3ed 100644
--- a/examples/notebook/constraint_solver/vrp_initial_routes.ipynb
+++ b/examples/notebook/routing/vrp_initial_routes.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -156,12 +155,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -189,12 +188,12 @@
" distance_dimension.SetGlobalSpanCostCoefficient(100)\n",
"\n",
" # Close model with the custom search parameters.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" search_parameters.time_limit.FromSeconds(5)\n",
" # When an initial solution is given for search, the model will be closed with\n",
diff --git a/examples/notebook/constraint_solver/vrp_items_to_deliver.ipynb b/examples/notebook/routing/vrp_items_to_deliver.ipynb
similarity index 96%
rename from examples/notebook/constraint_solver/vrp_items_to_deliver.ipynb
rename to examples/notebook/routing/vrp_items_to_deliver.ipynb
index a75bf948937..7f20d5d50b6 100644
--- a/examples/notebook/constraint_solver/vrp_items_to_deliver.ipynb
+++ b/examples/notebook/routing/vrp_items_to_deliver.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -93,9 +93,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -532,7 +531,7 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]),\n",
" data[\"num_vehicles\"],\n",
" data[\"starts\"],\n",
@@ -540,7 +539,7 @@
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
"\n",
" # Create and register a transit callback.\n",
@@ -627,12 +626,12 @@
" routing.AddDisjunction([manager.NodeToIndex(node)], penalty)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" # Sets a time limit; default is 100 milliseconds.\n",
" # search_parameters.log_search = True\n",
diff --git a/examples/notebook/constraint_solver/vrp_node_max.ipynb b/examples/notebook/routing/vrp_node_max.ipynb
similarity index 93%
rename from examples/notebook/constraint_solver/vrp_node_max.ipynb
rename to examples/notebook/routing/vrp_node_max.ipynb
index c5f2e9b4538..ec8f59d502a 100644
--- a/examples/notebook/constraint_solver/vrp_node_max.ipynb
+++ b/examples/notebook/routing/vrp_node_max.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -87,9 +87,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -191,13 +190,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
- "\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -293,12 +291,12 @@
" dim_two.SetCumulVarSoftUpperBound(end, 0, 4200)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" # search_parameters.log_search = True\n",
" search_parameters.time_limit.FromSeconds(5)\n",
diff --git a/examples/notebook/constraint_solver/vrp_nodes_indices.ipynb b/examples/notebook/routing/vrp_nodes_indices.ipynb
similarity index 90%
rename from examples/notebook/constraint_solver/vrp_nodes_indices.ipynb
rename to examples/notebook/routing/vrp_nodes_indices.ipynb
index a795ca98bbb..2228f39731e 100644
--- a/examples/notebook/constraint_solver/vrp_nodes_indices.ipynb
+++ b/examples/notebook/routing/vrp_nodes_indices.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -110,8 +110,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def main():\n",
@@ -122,8 +121,8 @@
" vehicles = len(starts)\n",
" assert len(starts) == len(ends)\n",
"\n",
- " manager = pywrapcp.RoutingIndexManager(locations, vehicles, starts, ends)\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " manager = pywraprouting.RoutingIndexManager(locations, vehicles, starts, ends)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" print(\"Starts/Ends:\")\n",
" header = \"| |\"\n",
diff --git a/examples/notebook/constraint_solver/vrp_pickup_delivery.ipynb b/examples/notebook/routing/vrp_pickup_delivery.ipynb
similarity index 91%
rename from examples/notebook/constraint_solver/vrp_pickup_delivery.ipynb
rename to examples/notebook/routing/vrp_pickup_delivery.ipynb
index 8bf90a41ab7..3ad66bcaf37 100644
--- a/examples/notebook/constraint_solver/vrp_pickup_delivery.ipynb
+++ b/examples/notebook/routing/vrp_pickup_delivery.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -157,12 +156,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
"\n",
" # Define cost of each arc.\n",
@@ -202,9 +201,9 @@
" )\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION\n",
+ " enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/vrp_pickup_delivery_fifo.ipynb b/examples/notebook/routing/vrp_pickup_delivery_fifo.ipynb
similarity index 90%
rename from examples/notebook/constraint_solver/vrp_pickup_delivery_fifo.ipynb
rename to examples/notebook/routing/vrp_pickup_delivery_fifo.ipynb
index e783169c6be..e07ccf7ff88 100644
--- a/examples/notebook/constraint_solver/vrp_pickup_delivery_fifo.ipynb
+++ b/examples/notebook/routing/vrp_pickup_delivery_fifo.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -157,12 +156,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
"\n",
" # Define cost of each arc.\n",
@@ -201,13 +200,13 @@
" <= distance_dimension.CumulVar(delivery_index)\n",
" )\n",
" routing.SetPickupAndDeliveryPolicyOfAllVehicles(\n",
- " pywrapcp.RoutingModel.PICKUP_AND_DELIVERY_FIFO\n",
+ " pywraprouting.RoutingModel.PICKUP_AND_DELIVERY_FIFO\n",
" )\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION\n",
+ " enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/vrp_pickup_delivery_lifo.ipynb b/examples/notebook/routing/vrp_pickup_delivery_lifo.ipynb
similarity index 90%
rename from examples/notebook/constraint_solver/vrp_pickup_delivery_lifo.ipynb
rename to examples/notebook/routing/vrp_pickup_delivery_lifo.ipynb
index 7ec8bfee398..e66c68c509e 100644
--- a/examples/notebook/constraint_solver/vrp_pickup_delivery_lifo.ipynb
+++ b/examples/notebook/routing/vrp_pickup_delivery_lifo.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -157,12 +156,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
"\n",
" # Define cost of each arc.\n",
@@ -201,13 +200,13 @@
" <= distance_dimension.CumulVar(delivery_index)\n",
" )\n",
" routing.SetPickupAndDeliveryPolicyOfAllVehicles(\n",
- " pywrapcp.RoutingModel.PICKUP_AND_DELIVERY_LIFO\n",
+ " pywraprouting.RoutingModel.PICKUP_AND_DELIVERY_LIFO\n",
" )\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION\n",
+ " enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/vrp_resources.ipynb b/examples/notebook/routing/vrp_resources.ipynb
similarity index 93%
rename from examples/notebook/constraint_solver/vrp_resources.ipynb
rename to examples/notebook/routing/vrp_resources.ipynb
index c861e6a3a81..b8b57a1f764 100644
--- a/examples/notebook/constraint_solver/vrp_resources.ipynb
+++ b/examples/notebook/routing/vrp_resources.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -172,12 +171,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"time_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def time_callback(from_index, to_index):\n",
@@ -249,9 +248,9 @@
" routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/vrp_solution_callback.ipynb b/examples/notebook/routing/vrp_solution_callback.ipynb
similarity index 87%
rename from examples/notebook/constraint_solver/vrp_solution_callback.ipynb
rename to examples/notebook/routing/vrp_solution_callback.ipynb
index b66d471977f..95aedc93987 100644
--- a/examples/notebook/constraint_solver/vrp_solution_callback.ipynb
+++ b/examples/notebook/routing/vrp_solution_callback.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -93,9 +93,8 @@
"source": [
"import weakref\n",
"\n",
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -128,7 +127,8 @@
"\n",
"\n",
"def print_solution(\n",
- " routing_manager: pywrapcp.RoutingIndexManager, routing_model: pywrapcp.RoutingModel\n",
+ " routing_manager: pywraprouting.RoutingIndexManager,\n",
+ " routing_model: pywraprouting.RoutingModel,\n",
"):\n",
" \"\"\"Prints solution on console.\"\"\"\n",
" print(\"################\")\n",
@@ -160,8 +160,8 @@
"\n",
" def __init__(\n",
" self,\n",
- " manager: pywrapcp.RoutingIndexManager,\n",
- " model: pywrapcp.RoutingModel,\n",
+ " manager: pywraprouting.RoutingIndexManager,\n",
+ " model: pywraprouting.RoutingModel,\n",
" limit: int,\n",
" ):\n",
" # We need a weak ref on the routing model to avoid a cycle.\n",
@@ -177,11 +177,12 @@
" ) # pytype: disable=attribute-error\n",
" if not self.objectives or objective < self.objectives[-1]:\n",
" self.objectives.append(objective)\n",
- " print_solution(self._routing_manager_ref(), self._routing_model_ref())\n",
+ " print_solution(\n",
+ " self._routing_manager_ref(), self._routing_model_ref()\n",
+ " ) # pytype: disable=attribute-error\n",
" self._counter += 1\n",
" if self._counter > self._counter_limit:\n",
- " self._routing_model_ref().solver().FinishCurrentSearch()\n",
- "\n",
+ " self._routing_model_ref().solver().FinishCurrentSearch() # pytype: disable=attribute-error\n",
"\n",
"\n",
"\n",
@@ -191,12 +192,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " routing_manager = pywrapcp.RoutingIndexManager(\n",
+ " routing_manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing_model = pywrapcp.RoutingModel(routing_manager)\n",
+ " routing_model = pywraprouting.RoutingModel(routing_manager)\n",
"\n",
"\n",
" # Create and register a transit callback.\n",
@@ -229,12 +230,12 @@
" routing_model.AddAtSolutionCallback(solution_callback)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" search_parameters.time_limit.FromSeconds(5)\n",
"\n",
diff --git a/examples/notebook/constraint_solver/vrp_starts_ends.ipynb b/examples/notebook/routing/vrp_starts_ends.ipynb
similarity index 89%
rename from examples/notebook/constraint_solver/vrp_starts_ends.ipynb
rename to examples/notebook/routing/vrp_starts_ends.ipynb
index 1f3a3f3e0db..8450579d020 100644
--- a/examples/notebook/constraint_solver/vrp_starts_ends.ipynb
+++ b/examples/notebook/routing/vrp_starts_ends.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -148,12 +147,15 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
- " len(data[\"distance_matrix\"]), data[\"num_vehicles\"], data[\"starts\"], data[\"ends\"]\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
+ " len(data[\"distance_matrix\"]),\n",
+ " data[\"num_vehicles\"],\n",
+ " data[\"starts\"],\n",
+ " data[\"ends\"],\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -181,9 +183,9 @@
" distance_dimension.SetGlobalSpanCostCoefficient(100)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/vrp_time_windows.ipynb b/examples/notebook/routing/vrp_time_windows.ipynb
similarity index 92%
rename from examples/notebook/constraint_solver/vrp_time_windows.ipynb
rename to examples/notebook/routing/vrp_time_windows.ipynb
index 516707633bd..460da3387be 100644
--- a/examples/notebook/constraint_solver/vrp_time_windows.ipynb
+++ b/examples/notebook/routing/vrp_time_windows.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -169,12 +168,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"time_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def time_callback(from_index, to_index):\n",
@@ -221,9 +220,9 @@
" routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/notebook/constraint_solver/vrp_time_windows_per_vehicles.ipynb b/examples/notebook/routing/vrp_time_windows_per_vehicles.ipynb
similarity index 93%
rename from examples/notebook/constraint_solver/vrp_time_windows_per_vehicles.ipynb
rename to examples/notebook/routing/vrp_time_windows_per_vehicles.ipynb
index 205d5d7b19f..8240bda85c9 100644
--- a/examples/notebook/constraint_solver/vrp_time_windows_per_vehicles.ipynb
+++ b/examples/notebook/routing/vrp_time_windows_per_vehicles.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -97,9 +97,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -187,12 +186,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" 1 + 16 * 4, data[\"num_vehicles\"], data[\"depot\"] # number of locations\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
"\n",
" # Create and register a transit callback.\n",
@@ -276,12 +275,12 @@
" routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" search_parameters.time_limit.FromSeconds(1)\n",
"\n",
diff --git a/examples/notebook/constraint_solver/vrp_tokens.ipynb b/examples/notebook/routing/vrp_tokens.ipynb
similarity index 91%
rename from examples/notebook/constraint_solver/vrp_tokens.ipynb
rename to examples/notebook/routing/vrp_tokens.ipynb
index cea5aa12cfd..776ecdb4aa3 100644
--- a/examples/notebook/constraint_solver/vrp_tokens.ipynb
+++ b/examples/notebook/routing/vrp_tokens.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -161,12 +160,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"tokens\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def distance_callback(from_index, to_index):\n",
@@ -221,12 +220,12 @@
" )\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" search_parameters.time_limit.FromSeconds(1)\n",
"\n",
diff --git a/examples/notebook/constraint_solver/vrp_with_time_limit.ipynb b/examples/notebook/routing/vrp_with_time_limit.ipynb
similarity index 85%
rename from examples/notebook/constraint_solver/vrp_with_time_limit.ipynb
rename to examples/notebook/routing/vrp_with_time_limit.ipynb
index cbafba12e8b..8c0410f1b83 100644
--- a/examples/notebook/constraint_solver/vrp_with_time_limit.ipynb
+++ b/examples/notebook/routing/vrp_with_time_limit.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def print_solution(manager, routing, solution):\n",
@@ -120,10 +119,10 @@
" depot = 0\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(num_locations, num_vehicles, depot)\n",
+ " manager = pywraprouting.RoutingIndexManager(num_locations, num_vehicles, depot)\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
"\n",
" # Create and register a transit callback.\n",
@@ -150,12 +149,12 @@
" distance_dimension.SetGlobalSpanCostCoefficient(100)\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
" search_parameters.local_search_metaheuristic = (\n",
- " routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
+ " enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH\n",
" )\n",
" search_parameters.log_search = True\n",
" search_parameters.time_limit.FromSeconds(5)\n",
diff --git a/examples/notebook/constraint_solver/vrptw_store_solution_data.ipynb b/examples/notebook/routing/vrptw_store_solution_data.ipynb
similarity index 93%
rename from examples/notebook/constraint_solver/vrptw_store_solution_data.ipynb
rename to examples/notebook/routing/vrptw_store_solution_data.ipynb
index 558847e5fc3..0389628ffa6 100644
--- a/examples/notebook/constraint_solver/vrptw_store_solution_data.ipynb
+++ b/examples/notebook/routing/vrptw_store_solution_data.ipynb
@@ -41,10 +41,10 @@
"source": [
""
]
@@ -83,9 +83,8 @@
"metadata": {},
"outputs": [],
"source": [
- "from ortools.constraint_solver import routing_enums_pb2\n",
- "from ortools.constraint_solver import pywrapcp\n",
- "\n",
+ "from ortools.routing import enums_pb2\n",
+ "from ortools.routing import pywraprouting\n",
"\n",
"\n",
"def create_data_model():\n",
@@ -217,12 +216,12 @@
" data = create_data_model()\n",
"\n",
" # Create the routing index manager.\n",
- " manager = pywrapcp.RoutingIndexManager(\n",
+ " manager = pywraprouting.RoutingIndexManager(\n",
" len(data[\"time_matrix\"]), data[\"num_vehicles\"], data[\"depot\"]\n",
" )\n",
"\n",
" # Create Routing Model.\n",
- " routing = pywrapcp.RoutingModel(manager)\n",
+ " routing = pywraprouting.RoutingModel(manager)\n",
"\n",
" # Create and register a transit callback.\n",
" def time_callback(from_index, to_index):\n",
@@ -269,9 +268,9 @@
" routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))\n",
"\n",
" # Setting first solution heuristic.\n",
- " search_parameters = pywrapcp.DefaultRoutingSearchParameters()\n",
+ " search_parameters = pywraprouting.DefaultRoutingSearchParameters()\n",
" search_parameters.first_solution_strategy = (\n",
- " routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
+ " enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC\n",
" )\n",
"\n",
" # Solve the problem.\n",
diff --git a/examples/python/appointments.py b/examples/python/appointments.py
index 328ac691f07..7282e43cf3d 100644
--- a/examples/python/appointments.py
+++ b/examples/python/appointments.py
@@ -32,57 +32,61 @@
_LOAD_MIN = flags.DEFINE_integer("load_min", 480, "Minimum load in minutes.")
_LOAD_MAX = flags.DEFINE_integer("load_max", 540, "Maximum load in minutes.")
-_COMMUTE_TIME = flags.DEFINE_integer("commute_time", 30, "Commute time in minutes.")
-_NUM_WORKERS = flags.DEFINE_integer("num_workers", 98, "Maximum number of workers.")
+_COMMUTE_TIME = flags.DEFINE_integer(
+ "commute_time", 30, "Commute time in minutes."
+)
+_NUM_WORKERS = flags.DEFINE_integer(
+ "num_workers", 98, "Maximum number of workers."
+)
class AllSolutionCollector(cp_model.CpSolverSolutionCallback):
- """Stores all solutions."""
+ """Stores all solutions."""
- def __init__(self, variables):
- cp_model.CpSolverSolutionCallback.__init__(self)
- self.__variables = variables
- self.__collect = []
+ def __init__(self, variables):
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self.__variables = variables
+ self.__collect = []
- def on_solution_callback(self) -> None:
- """Collect a new combination."""
- combination = [self.value(v) for v in self.__variables]
- self.__collect.append(combination)
+ def on_solution_callback(self) -> None:
+ """Collect a new combination."""
+ combination = [self.value(v) for v in self.__variables]
+ self.__collect.append(combination)
- def combinations(self) -> list[list[int]]:
- """Returns all collected combinations."""
- return self.__collect
+ def combinations(self) -> list[list[int]]:
+ """Returns all collected combinations."""
+ return self.__collect
def enumerate_all_knapsacks_with_repetition(
item_sizes: list[int], total_size_min: int, total_size_max: int
) -> list[list[int]]:
- """Enumerate all possible knapsacks with total size in the given range.
-
- Args:
- item_sizes: a list of integers. item_sizes[i] is the size of item #i.
- total_size_min: an integer, the minimum total size.
- total_size_max: an integer, the maximum total size.
-
- Returns:
- The list of all the knapsacks whose total size is in the given inclusive
- range. Each knapsack is a list [#item0, #item1, ... ], where #itemK is an
- nonnegative integer: the number of times we put item #K in the knapsack.
- """
- model = cp_model.CpModel()
- variables = [
- model.new_int_var(0, total_size_max // size, "") for size in item_sizes
- ]
- load = sum(variables[i] * size for i, size in enumerate(item_sizes))
- model.add_linear_constraint(load, total_size_min, total_size_max)
-
- solver = cp_model.CpSolver()
- solution_collector = AllSolutionCollector(variables)
- # Enumerate all solutions.
- solver.parameters.enumerate_all_solutions = True
- # solve
- solver.solve(model, solution_collector)
- return solution_collector.combinations()
+ """Enumerate all possible knapsacks with total size in the given range.
+
+ Args:
+ item_sizes: a list of integers. item_sizes[i] is the size of item #i.
+ total_size_min: an integer, the minimum total size.
+ total_size_max: an integer, the maximum total size.
+
+ Returns:
+ The list of all the knapsacks whose total size is in the given inclusive
+ range. Each knapsack is a list [#item0, #item1, ... ], where #itemK is an
+ nonnegative integer: the number of times we put item #K in the knapsack.
+ """
+ model = cp_model.CpModel()
+ variables = [
+ model.new_int_var(0, total_size_max // size, "") for size in item_sizes
+ ]
+ load = sum(variables[i] * size for i, size in enumerate(item_sizes))
+ model.add_linear_constraint(load, total_size_min, total_size_max)
+
+ solver = cp_model.CpSolver()
+ solution_collector = AllSolutionCollector(variables)
+ # Enumerate all solutions.
+ solver.parameters.enumerate_all_solutions = True
+ # solve
+ solver.solve(model, solution_collector)
+ return solution_collector.combinations()
def aggregate_item_collections_optimally(
@@ -90,186 +94,185 @@ def aggregate_item_collections_optimally(
max_num_collections: int,
ideal_item_ratios: list[float],
) -> list[int]:
- """Selects a set (with repetition) of combination of items optimally.
-
- Given a set of collections of N possible items (in each collection, an item
- may appear multiple times), a given "ideal breakdown of items", and a
- maximum number of collections, this method finds the optimal way to
- aggregate the collections in order to:
- - maximize the overall number of items
- - while keeping the ratio of each item, among the overall selection, as close
- as possible to a given input ratio (which depends on the item).
- Each collection may be selected more than one time.
-
- Args:
- item_collections: a list of item collections. Each item collection is a list
- of integers [#item0, ..., #itemN-1], where #itemK is the number of times
- item #K appears in the collection, and N is the number of distinct items.
- max_num_collections: an integer, the maximum number of item collections that
- may be selected (counting repetitions of the same collection).
- ideal_item_ratios: A list of N float which sums to 1.0: the K-th element is
- the ideal ratio of item #K in the whole aggregated selection.
-
- Returns:
- A pair (objective value, list of pairs (item collection, num_selections)),
- where:
- - "objective value" is the value of the internal objective function used
- by the MIP Solver
- - Each "item collection" is an element of the input item_collections
- - and its associated "num_selections" is the number of times it was
- selected.
- """
- solver = pywraplp.Solver.CreateSolver("SCIP")
- if not solver:
- return []
- n = len(ideal_item_ratios)
- num_distinct_collections = len(item_collections)
- max_num_items_per_collection = 0
- for template in item_collections:
- max_num_items_per_collection = max(max_num_items_per_collection, sum(template))
- upper_bound = max_num_items_per_collection * max_num_collections
-
- # num_selections_of_collection[i] is an IntVar that represents the number
- # of times that we will use collection #i in our global selection.
- num_selections_of_collection = [
- solver.IntVar(0, max_num_collections, "s[%d]" % i)
- for i in range(num_distinct_collections)
- ]
-
- # num_overall_item[i] is an IntVar that represents the total count of item #i,
- # aggregated over all selected collections. This is enforced with dedicated
- # constraints that bind them with the num_selections_of_collection vars.
- num_overall_item = [
- solver.IntVar(0, upper_bound, "num_overall_item[%d]" % i) for i in range(n)
- ]
- for i in range(n):
- ct = solver.Constraint(0.0, 0.0)
- ct.SetCoefficient(num_overall_item[i], -1)
- for j in range(num_distinct_collections):
- ct.SetCoefficient(num_selections_of_collection[j], item_collections[j][i])
-
- # Maintain the num_all_item variable as the sum of all num_overall_item
- # variables.
- num_all_items = solver.IntVar(0, upper_bound, "num_all_items")
- solver.Add(solver.Sum(num_overall_item) == num_all_items)
-
- # Sets the total number of workers.
- solver.Add(solver.Sum(num_selections_of_collection) == max_num_collections)
-
- # Objective variables.
- deviation_vars = [
- solver.NumVar(0, upper_bound, "deviation_vars[%d]" % i) for i in range(n)
- ]
- for i in range(n):
- deviation = deviation_vars[i]
- solver.Add(
- deviation >= num_overall_item[i] - ideal_item_ratios[i] * num_all_items
- )
- solver.Add(
- deviation >= ideal_item_ratios[i] * num_all_items - num_overall_item[i]
- )
-
- solver.Maximize(num_all_items - solver.Sum(deviation_vars))
-
- result_status = solver.Solve()
-
- if result_status == pywraplp.Solver.OPTIMAL:
- # The problem has an optimal solution.
- return [int(v.solution_value()) for v in num_selections_of_collection]
+ """Selects a set (with repetition) of combination of items optimally.
+
+ Given a set of collections of N possible items (in each collection, an item
+ may appear multiple times), a given "ideal breakdown of items", and a
+ maximum number of collections, this method finds the optimal way to
+ aggregate the collections in order to:
+ - maximize the overall number of items
+ - while keeping the ratio of each item, among the overall selection, as close
+ as possible to a given input ratio (which depends on the item).
+ Each collection may be selected more than one time.
+
+ Args:
+ item_collections: a list of item collections. Each item collection is a list
+ of integers [#item0, ..., #itemN-1], where #itemK is the number of times
+ item #K appears in the collection, and N is the number of distinct items.
+ max_num_collections: an integer, the maximum number of item collections that
+ may be selected (counting repetitions of the same collection).
+ ideal_item_ratios: A list of N float which sums to 1.0: the K-th element is
+ the ideal ratio of item #K in the whole aggregated selection.
+
+ Returns:
+ A pair (objective value, list of pairs (item collection, num_selections)),
+ where:
+ - "objective value" is the value of the internal objective function used
+ by the MIP Solver
+ - Each "item collection" is an element of the input item_collections
+ - and its associated "num_selections" is the number of times it was
+ selected.
+ """
+ solver = pywraplp.Solver.CreateSolver("SCIP")
+ if not solver:
return []
+ n = len(ideal_item_ratios)
+ num_distinct_collections = len(item_collections)
+ max_num_items_per_collection = 0
+ for template in item_collections:
+ max_num_items_per_collection = max(
+ max_num_items_per_collection, sum(template)
+ )
+ upper_bound = max_num_items_per_collection * max_num_collections
+
+ # num_selections_of_collection[i] is an IntVar that represents the number
+ # of times that we will use collection #i in our global selection.
+ num_selections_of_collection = [
+ solver.IntVar(0, max_num_collections, "s[%d]" % i)
+ for i in range(num_distinct_collections)
+ ]
+
+ # num_overall_item[i] is an IntVar that represents the total count of item #i,
+ # aggregated over all selected collections. This is enforced with dedicated
+ # constraints that bind them with the num_selections_of_collection vars.
+ num_overall_item = [
+ solver.IntVar(0, upper_bound, "num_overall_item[%d]" % i)
+ for i in range(n)
+ ]
+ for i in range(n):
+ ct = solver.Constraint(0.0, 0.0)
+ ct.SetCoefficient(num_overall_item[i], -1)
+ for j in range(num_distinct_collections):
+ ct.SetCoefficient(num_selections_of_collection[j], item_collections[j][i])
+
+ # Maintain the num_all_item variable as the sum of all num_overall_item
+ # variables.
+ num_all_items = solver.IntVar(0, upper_bound, "num_all_items")
+ solver.Add(solver.Sum(num_overall_item) == num_all_items)
+
+ # Sets the total number of workers.
+ solver.Add(solver.Sum(num_selections_of_collection) == max_num_collections)
+
+ # Objective variables.
+ deviation_vars = [
+ solver.NumVar(0, upper_bound, "deviation_vars[%d]" % i) for i in range(n)
+ ]
+ for i in range(n):
+ deviation = deviation_vars[i]
+ solver.Add(
+ deviation >= num_overall_item[i] - ideal_item_ratios[i] * num_all_items
+ )
+ solver.Add(
+ deviation >= ideal_item_ratios[i] * num_all_items - num_overall_item[i]
+ )
+
+ solver.Maximize(num_all_items - solver.Sum(deviation_vars))
+
+ result_status = solver.Solve()
+
+ if result_status == pywraplp.Solver.OPTIMAL:
+ # The problem has an optimal solution.
+ return [int(v.solution_value()) for v in num_selections_of_collection]
+ return []
def get_optimal_schedule(
- demand: list[tuple[float, str, int]]
+ demand: list[tuple[float, str, int]],
) -> list[tuple[int, list[tuple[int, str]]]]:
- """Computes the optimal schedule for the installation input.
-
- Args:
- demand: a list of "appointment types". Each "appointment type" is a triple
- (ideal_ratio_pct, name, duration_minutes), where ideal_ratio_pct is the
- ideal percentage (in [0..100.0]) of that type of appointment among all
- appointments scheduled.
-
- Returns:
- The same output type as EnumerateAllKnapsacksWithRepetition.
- """
- combinations = enumerate_all_knapsacks_with_repetition(
- [a[2] + _COMMUTE_TIME.value for a in demand],
- _LOAD_MIN.value,
- _LOAD_MAX.value,
- )
- print(
- (
- "Found %d possible day schedules " % len(combinations)
- + "(i.e. combination of appointments filling up one worker's day)"
- )
- )
-
- selection = aggregate_item_collections_optimally(
- combinations, _NUM_WORKERS.value, [a[0] / 100.0 for a in demand]
- )
- output = []
- for i, s in enumerate(selection):
- if s != 0:
- output.append(
- (
- s,
- [
- (combinations[i][t], d[1])
- for t, d in enumerate(demand)
- if combinations[i][t] != 0
- ],
- )
- )
-
- return output
+ """Computes the optimal schedule for the installation input.
+
+ Args:
+ demand: a list of "appointment types". Each "appointment type" is a triple
+ (ideal_ratio_pct, name, duration_minutes), where ideal_ratio_pct is the
+ ideal percentage (in [0..100.0]) of that type of appointment among all
+ appointments scheduled.
+
+ Returns:
+ The same output type as EnumerateAllKnapsacksWithRepetition.
+ """
+ combinations = enumerate_all_knapsacks_with_repetition(
+ [a[2] + _COMMUTE_TIME.value for a in demand],
+ _LOAD_MIN.value,
+ _LOAD_MAX.value,
+ )
+ print((
+ "Found %d possible day schedules " % len(combinations)
+ + "(i.e. combination of appointments filling up one worker's day)"
+ ))
+
+ selection = aggregate_item_collections_optimally(
+ combinations, _NUM_WORKERS.value, [a[0] / 100.0 for a in demand]
+ )
+ output = []
+ for i, s in enumerate(selection):
+ if s != 0:
+ output.append((
+ s,
+ [
+ (combinations[i][t], d[1])
+ for t, d in enumerate(demand)
+ if combinations[i][t] != 0
+ ],
+ ))
+
+ return output
def main(_):
- demand = [(45.0, "Type1", 90), (30.0, "Type2", 120), (25.0, "Type3", 180)]
- print("*** input problem ***")
- print("Appointments: ")
- for a in demand:
- print(" %.2f%% of %s : %d min" % (a[0], a[1], a[2]))
- print("Commute time = %d" % _COMMUTE_TIME.value)
- print(
- "Acceptable duration of a work day = [%d..%d]"
- % (_LOAD_MIN.value, _LOAD_MAX.value)
- )
- print("%d workers" % _NUM_WORKERS.value)
- selection = get_optimal_schedule(demand)
- print()
- installed = 0
- installed_per_type = {}
- for a in demand:
- installed_per_type[a[1]] = 0
-
- # [START print_solution]
- print("*** output solution ***")
- for template in selection:
- num_instances = template[0]
- print("%d schedules with " % num_instances)
- for t in template[1]:
- mult = t[0]
- print(" %d installation of type %s" % (mult, t[1]))
- installed += num_instances * mult
- installed_per_type[t[1]] += num_instances * mult
-
- print()
- print("%d installations planned" % installed)
- for a in demand:
- name = a[1]
- per_type = installed_per_type[name]
- if installed != 0:
- print(
- f" {per_type} ({per_type * 100.0 / installed}%) installations of"
- f" type {name} planned"
- )
- else:
- print(f" {per_type} installations of type {name} planned")
- # [END print_solution]
+ demand = [(45.0, "Type1", 90), (30.0, "Type2", 120), (25.0, "Type3", 180)]
+ print("*** input problem ***")
+ print("Appointments: ")
+ for a in demand:
+ print(" %.2f%% of %s : %d min" % (a[0], a[1], a[2]))
+ print("Commute time = %d" % _COMMUTE_TIME.value)
+ print(
+ "Acceptable duration of a work day = [%d..%d]"
+ % (_LOAD_MIN.value, _LOAD_MAX.value)
+ )
+ print("%d workers" % _NUM_WORKERS.value)
+ selection = get_optimal_schedule(demand)
+ print()
+ installed = 0
+ installed_per_type = {}
+ for a in demand:
+ installed_per_type[a[1]] = 0
+
+ # [START print_solution]
+ print("*** output solution ***")
+ for template in selection:
+ num_instances = template[0]
+ print("%d schedules with " % num_instances)
+ for t in template[1]:
+ mult = t[0]
+ print(" %d installation of type %s" % (mult, t[1]))
+ installed += num_instances * mult
+ installed_per_type[t[1]] += num_instances * mult
+
+ print()
+ print("%d installations planned" % installed)
+ for a in demand:
+ name = a[1]
+ per_type = installed_per_type[name]
+ if installed != 0:
+ print(
+ f" {per_type} ({per_type * 100.0 / installed}%) installations of"
+ f" type {name} planned"
+ )
+ else:
+ print(f" {per_type} installations of type {name} planned")
+ # [END print_solution]
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
# [END program]
diff --git a/examples/python/arc_flow_cutting_stock_sat.py b/examples/python/arc_flow_cutting_stock_sat.py
index 9b70f2c2cac..eb73bcc2795 100644
--- a/examples/python/arc_flow_cutting_stock_sat.py
+++ b/examples/python/arc_flow_cutting_stock_sat.py
@@ -34,7 +34,9 @@
"num_search_workers:8,log_search_progress:true,max_time_in_seconds:10",
"Sat solver parameters.",
)
-_SOLVER = flags.DEFINE_string("solver", "sat", "Method used to solve: sat, mip.")
+_SOLVER = flags.DEFINE_string(
+ "solver", "sat", "Method used to solve: sat, mip."
+)
DESIRED_LENGTHS = [
@@ -184,247 +186,256 @@
def regroup_and_count(raw_input):
- """Regroup all equal capacities in a multiset."""
- grouped = collections.defaultdict(int)
- for i in raw_input:
- grouped[i] += 1
- output = []
- for size, count in grouped.items():
- output.append([size, count])
- output.sort(reverse=False)
- return output
+ """Regroup all equal capacities in a multiset."""
+ grouped = collections.defaultdict(int)
+ for i in raw_input:
+ grouped[i] += 1
+ output = []
+ for size, count in grouped.items():
+ output.append([size, count])
+ output.sort(reverse=False)
+ return output
def price_usage(usage, capacities):
- """Compute the best price for a given usage and possible capacities."""
- price = max(capacities)
- for capacity in capacities:
- if capacity < usage:
- continue
- price = min(capacity - usage, price)
- return price
+ """Compute the best price for a given usage and possible capacities."""
+ price = max(capacities)
+ for capacity in capacities:
+ if capacity < usage:
+ continue
+ price = min(capacity - usage, price)
+ return price
def create_state_graph(items, max_capacity):
- """Create a state graph from a multiset of items, and a maximum capacity."""
- states = []
- state_to_index = {}
- states.append(0)
- state_to_index[0] = 0
- transitions = []
-
- for item_index, size_and_count in enumerate(items):
- size, count = size_and_count
- num_states = len(states)
- for state_index in range(num_states):
- current_state = states[state_index]
- current_state_index = state_index
-
- for card in range(count):
- new_state = current_state + size * (card + 1)
- if new_state > max_capacity:
- break
- if new_state in state_to_index:
- new_state_index = state_to_index[new_state]
- else:
- new_state_index = len(states)
- states.append(new_state)
- state_to_index[new_state] = new_state_index
- # Add the transition
- transitions.append(
- [current_state_index, new_state_index, item_index, card + 1]
- )
-
- return states, transitions
-
-
-def solve_cutting_stock_with_arc_flow_and_sat(output_proto_file: str, params: str):
- """Solve the cutting stock with arc-flow and the CP-SAT solver."""
- items = regroup_and_count(DESIRED_LENGTHS)
- print("Items:", items)
- num_items = len(DESIRED_LENGTHS)
-
- max_capacity = max(POSSIBLE_CAPACITIES)
- states, transitions = create_state_graph(items, max_capacity)
+ """Create a state graph from a multiset of items, and a maximum capacity."""
+ states = []
+ state_to_index = {}
+ states.append(0)
+ state_to_index[0] = 0
+ transitions = []
+
+ for item_index, size_and_count in enumerate(items):
+ size, count = size_and_count
+ num_states = len(states)
+ for state_index in range(num_states):
+ current_state = states[state_index]
+ current_state_index = state_index
+
+ for card in range(count):
+ new_state = current_state + size * (card + 1)
+ if new_state > max_capacity:
+ break
+ if new_state in state_to_index:
+ new_state_index = state_to_index[new_state]
+ else:
+ new_state_index = len(states)
+ states.append(new_state)
+ state_to_index[new_state] = new_state_index
+ # Add the transition
+ transitions.append(
+ [current_state_index, new_state_index, item_index, card + 1]
+ )
- print(
- "Dynamic programming has generated",
- len(states),
- "states and",
- len(transitions),
- "transitions",
+ return states, transitions
+
+
+def solve_cutting_stock_with_arc_flow_and_sat(
+ output_proto_file: str, params: str
+):
+ """Solve the cutting stock with arc-flow and the CP-SAT solver."""
+ items = regroup_and_count(DESIRED_LENGTHS)
+ print("Items:", items)
+ num_items = len(DESIRED_LENGTHS)
+
+ max_capacity = max(POSSIBLE_CAPACITIES)
+ states, transitions = create_state_graph(items, max_capacity)
+
+ print(
+ "Dynamic programming has generated",
+ len(states),
+ "states and",
+ len(transitions),
+ "transitions",
+ )
+
+ incoming_vars = collections.defaultdict(list)
+ outgoing_vars = collections.defaultdict(list)
+ incoming_sink_vars = []
+ item_vars = collections.defaultdict(list)
+ item_coeffs = collections.defaultdict(list)
+ transition_vars = []
+
+ model = cp_model.CpModel()
+
+ objective_vars = []
+ objective_coeffs = []
+
+ for outgoing, incoming, item_index, card in transitions:
+ count = items[item_index][1]
+ max_count = count // card
+ count_var = model.NewIntVar(
+ 0, max_count, "i%i_f%i_t%i_C%s" % (item_index, incoming, outgoing, card)
+ )
+ incoming_vars[incoming].append(count_var)
+ outgoing_vars[outgoing].append(count_var)
+ item_vars[item_index].append(count_var)
+ item_coeffs[item_index].append(card)
+ transition_vars.append(count_var)
+
+ for state_index, state in enumerate(states):
+ if state_index == 0:
+ continue
+ exit_var = model.NewIntVar(0, num_items, "e%i" % state_index)
+ outgoing_vars[state_index].append(exit_var)
+ incoming_sink_vars.append(exit_var)
+ price = price_usage(state, POSSIBLE_CAPACITIES)
+ objective_vars.append(exit_var)
+ objective_coeffs.append(price)
+
+ # Flow conservation
+ for state_index in range(1, len(states)):
+ model.Add(
+ sum(incoming_vars[state_index]) == sum(outgoing_vars[state_index])
)
- incoming_vars = collections.defaultdict(list)
- outgoing_vars = collections.defaultdict(list)
- incoming_sink_vars = []
- item_vars = collections.defaultdict(list)
- item_coeffs = collections.defaultdict(list)
- transition_vars = []
-
- model = cp_model.CpModel()
-
- objective_vars = []
- objective_coeffs = []
+ # Flow going out of the source must go in the sink
+ model.Add(sum(outgoing_vars[0]) == sum(incoming_sink_vars))
- for outgoing, incoming, item_index, card in transitions:
- count = items[item_index][1]
- max_count = count // card
- count_var = model.NewIntVar(
- 0, max_count, "i%i_f%i_t%i_C%s" % (item_index, incoming, outgoing, card)
+ # Items must be placed
+ for item_index, size_and_count in enumerate(items):
+ num_arcs = len(item_vars[item_index])
+ model.Add(
+ sum(
+ item_vars[item_index][i] * item_coeffs[item_index][i]
+ for i in range(num_arcs)
)
- incoming_vars[incoming].append(count_var)
- outgoing_vars[outgoing].append(count_var)
- item_vars[item_index].append(count_var)
- item_coeffs[item_index].append(card)
- transition_vars.append(count_var)
-
- for state_index, state in enumerate(states):
- if state_index == 0:
- continue
- exit_var = model.NewIntVar(0, num_items, "e%i" % state_index)
- outgoing_vars[state_index].append(exit_var)
- incoming_sink_vars.append(exit_var)
- price = price_usage(state, POSSIBLE_CAPACITIES)
- objective_vars.append(exit_var)
- objective_coeffs.append(price)
-
- # Flow conservation
- for state_index in range(1, len(states)):
- model.Add(sum(incoming_vars[state_index]) == sum(outgoing_vars[state_index]))
-
- # Flow going out of the source must go in the sink
- model.Add(sum(outgoing_vars[0]) == sum(incoming_sink_vars))
-
- # Items must be placed
- for item_index, size_and_count in enumerate(items):
- num_arcs = len(item_vars[item_index])
- model.Add(
- sum(
- item_vars[item_index][i] * item_coeffs[item_index][i]
- for i in range(num_arcs)
- )
- == size_and_count[1]
- )
-
- # Objective is the sum of waste
- model.Minimize(
- sum(objective_vars[i] * objective_coeffs[i] for i in range(len(objective_vars)))
+ == size_and_count[1]
)
- # Output model proto to file.
- if output_proto_file:
- model.ExportToFile(output_proto_file)
+ # Objective is the sum of waste
+ model.Minimize(
+ sum(
+ objective_vars[i] * objective_coeffs[i]
+ for i in range(len(objective_vars))
+ )
+ )
- # Solve model.
- solver = cp_model.CpSolver()
- if params:
- text_format.Parse(params, solver.parameters)
- solver.parameters.log_search_progress = True
- solver.Solve(model)
+ # Output model proto to file.
+ if output_proto_file:
+ model.ExportToFile(output_proto_file)
+ # Solve model.
+ solver = cp_model.CpSolver()
+ if params:
+ text_format.Parse(params, solver.parameters)
+ solver.parameters.log_search_progress = True
+ solver.Solve(model)
-def solve_cutting_stock_with_arc_flow_and_mip():
- """Solve the cutting stock with arc-flow and a MIP solver."""
- items = regroup_and_count(DESIRED_LENGTHS)
- print("Items:", items)
- num_items = len(DESIRED_LENGTHS)
- max_capacity = max(POSSIBLE_CAPACITIES)
- states, transitions = create_state_graph(items, max_capacity)
- print(
- "Dynamic programming has generated",
- len(states),
- "states and",
- len(transitions),
- "transitions",
+def solve_cutting_stock_with_arc_flow_and_mip():
+ """Solve the cutting stock with arc-flow and a MIP solver."""
+ items = regroup_and_count(DESIRED_LENGTHS)
+ print("Items:", items)
+ num_items = len(DESIRED_LENGTHS)
+ max_capacity = max(POSSIBLE_CAPACITIES)
+ states, transitions = create_state_graph(items, max_capacity)
+
+ print(
+ "Dynamic programming has generated",
+ len(states),
+ "states and",
+ len(transitions),
+ "transitions",
+ )
+
+ incoming_vars = collections.defaultdict(list)
+ outgoing_vars = collections.defaultdict(list)
+ incoming_sink_vars = []
+ item_vars = collections.defaultdict(list)
+ item_coeffs = collections.defaultdict(list)
+
+ start_time = time.time()
+ model = mb.ModelBuilder()
+
+ objective_vars = []
+ objective_coeffs = []
+
+ var_index = 0
+ for outgoing, incoming, item_index, card in transitions:
+ count = items[item_index][1]
+ count_var = model.new_int_var(
+ 0,
+ count,
+ "a%i_i%i_f%i_t%i_c%i"
+ % (var_index, item_index, incoming, outgoing, card),
+ )
+ var_index += 1
+ incoming_vars[incoming].append(count_var)
+ outgoing_vars[outgoing].append(count_var)
+ item_vars[item_index].append(count_var)
+ item_coeffs[item_index].append(card)
+
+ for state_index, state in enumerate(states):
+ if state_index == 0:
+ continue
+ exit_var = model.new_int_var(0, num_items, "e%i" % state_index)
+ outgoing_vars[state_index].append(exit_var)
+ incoming_sink_vars.append(exit_var)
+ price = price_usage(state, POSSIBLE_CAPACITIES)
+ objective_vars.append(exit_var)
+ objective_coeffs.append(price)
+
+ # Flow conservation
+ for state_index in range(1, len(states)):
+ model.add(
+ mb.LinearExpr.sum(incoming_vars[state_index])
+ == mb.LinearExpr.sum(outgoing_vars[state_index])
)
- incoming_vars = collections.defaultdict(list)
- outgoing_vars = collections.defaultdict(list)
- incoming_sink_vars = []
- item_vars = collections.defaultdict(list)
- item_coeffs = collections.defaultdict(list)
-
- start_time = time.time()
- model = mb.ModelBuilder()
-
- objective_vars = []
- objective_coeffs = []
-
- var_index = 0
- for outgoing, incoming, item_index, card in transitions:
- count = items[item_index][1]
- count_var = model.new_int_var(
- 0,
- count,
- "a%i_i%i_f%i_t%i_c%i" % (var_index, item_index, incoming, outgoing, card),
- )
- var_index += 1
- incoming_vars[incoming].append(count_var)
- outgoing_vars[outgoing].append(count_var)
- item_vars[item_index].append(count_var)
- item_coeffs[item_index].append(card)
-
- for state_index, state in enumerate(states):
- if state_index == 0:
- continue
- exit_var = model.new_int_var(0, num_items, "e%i" % state_index)
- outgoing_vars[state_index].append(exit_var)
- incoming_sink_vars.append(exit_var)
- price = price_usage(state, POSSIBLE_CAPACITIES)
- objective_vars.append(exit_var)
- objective_coeffs.append(price)
-
- # Flow conservation
- for state_index in range(1, len(states)):
- model.add(
- mb.LinearExpr.sum(incoming_vars[state_index])
- == mb.LinearExpr.sum(outgoing_vars[state_index])
- )
+ # Flow going out of the source must go in the sink
+ model.add(
+ mb.LinearExpr.sum(outgoing_vars[0])
+ == mb.LinearExpr.sum(incoming_sink_vars)
+ )
- # Flow going out of the source must go in the sink
+ # Items must be placed
+ for item_index, size_and_count in enumerate(items):
+ num_arcs = len(item_vars[item_index])
model.add(
- mb.LinearExpr.sum(outgoing_vars[0]) == mb.LinearExpr.sum(incoming_sink_vars)
+ mb.LinearExpr.sum([
+ item_vars[item_index][i] * item_coeffs[item_index][i]
+ for i in range(num_arcs)
+ ])
+ == size_and_count[1]
)
- # Items must be placed
- for item_index, size_and_count in enumerate(items):
- num_arcs = len(item_vars[item_index])
- model.add(
- mb.LinearExpr.sum(
- [
- item_vars[item_index][i] * item_coeffs[item_index][i]
- for i in range(num_arcs)
- ]
- )
- == size_and_count[1]
- )
-
- # Objective is the sum of waste
- model.minimize(np.dot(objective_vars, objective_coeffs))
+ # Objective is the sum of waste
+ model.minimize(np.dot(objective_vars, objective_coeffs))
- solver = mb.ModelSolver("scip")
- solver.enable_output(True)
- status = solver.solve(model)
+ solver = mb.ModelSolver("scip")
+ solver.enable_output(True)
+ status = solver.solve(model)
- ### Output the solution.
- if status == mb.SolveStatus.OPTIMAL or status == mb.SolveStatus.FEASIBLE:
- print(
- "Objective value = %f found in %.2f s"
- % (solver.objective_value, time.time() - start_time)
- )
- else:
- print("No solution")
+ ### Output the solution.
+ if status == mb.SolveStatus.OPTIMAL or status == mb.SolveStatus.FEASIBLE:
+ print(
+ "Objective value = %f found in %.2f s"
+ % (solver.objective_value, time.time() - start_time)
+ )
+ else:
+ print("No solution")
def main(_):
- """Main function."""
- if _SOLVER.value == "sat":
- solve_cutting_stock_with_arc_flow_and_sat(_OUTPUT_PROTO.value, _PARAMS.value)
- else: # 'mip'
- solve_cutting_stock_with_arc_flow_and_mip()
+ """Main function."""
+ if _SOLVER.value == "sat":
+ solve_cutting_stock_with_arc_flow_and_sat(
+ _OUTPUT_PROTO.value, _PARAMS.value
+ )
+ else: # 'mip'
+ solve_cutting_stock_with_arc_flow_and_mip()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/assignment_with_constraints_sat.py b/examples/python/assignment_with_constraints_sat.py
index f32d6299432..68761917309 100644
--- a/examples/python/assignment_with_constraints_sat.py
+++ b/examples/python/assignment_with_constraints_sat.py
@@ -20,109 +20,118 @@
def solve_assignment():
- """solve the assignment problem."""
- # Data.
- cost = [
- [90, 76, 75, 70, 50, 74],
- [35, 85, 55, 65, 48, 101],
- [125, 95, 90, 105, 59, 120],
- [45, 110, 95, 115, 104, 83],
- [60, 105, 80, 75, 59, 62],
- [45, 65, 110, 95, 47, 31],
- [38, 51, 107, 41, 69, 99],
- [47, 85, 57, 71, 92, 77],
- [39, 63, 97, 49, 118, 56],
- [47, 101, 71, 60, 88, 109],
- [17, 39, 103, 64, 61, 92],
- [101, 45, 83, 59, 92, 27],
- ]
-
- group1 = [
- [0, 0, 1, 1], # Workers 2, 3
- [0, 1, 0, 1], # Workers 1, 3
- [0, 1, 1, 0], # Workers 1, 2
- [1, 1, 0, 0], # Workers 0, 1
- [1, 0, 1, 0], # Workers 0, 2
- ]
-
- group2 = [
- [0, 0, 1, 1], # Workers 6, 7
- [0, 1, 0, 1], # Workers 5, 7
- [0, 1, 1, 0], # Workers 5, 6
- [1, 1, 0, 0], # Workers 4, 5
- [1, 0, 0, 1], # Workers 4, 7
- ]
-
- group3 = [
- [0, 0, 1, 1], # Workers 10, 11
- [0, 1, 0, 1], # Workers 9, 11
- [0, 1, 1, 0], # Workers 9, 10
- [1, 0, 1, 0], # Workers 8, 10
- [1, 0, 0, 1], # Workers 8, 11
- ]
-
- sizes = [10, 7, 3, 12, 15, 4, 11, 5]
- total_size_max = 15
- num_workers = len(cost)
- num_tasks = len(cost[1])
- all_workers = range(num_workers)
- all_tasks = range(num_tasks)
-
- # Model.
-
- model = cp_model.CpModel()
- # Variables
- selected = [
- [model.new_bool_var(f"x[{i},{j}]") for j in all_tasks] for i in all_workers
- ]
- works = [model.new_bool_var(f"works[{i}]") for i in all_workers]
-
- # Constraints
-
- # Link selected and workers.
- for i in range(num_workers):
- model.add_max_equality(works[i], selected[i])
-
- # Each task is assigned to at least one worker.
- for j in all_tasks:
- model.add(sum(selected[i][j] for i in all_workers) >= 1)
-
- # Total task size for each worker is at most total_size_max
- for i in all_workers:
- model.add(sum(sizes[j] * selected[i][j] for j in all_tasks) <= total_size_max)
-
- # Group constraints.
- model.add_allowed_assignments([works[0], works[1], works[2], works[3]], group1)
- model.add_allowed_assignments([works[4], works[5], works[6], works[7]], group2)
- model.add_allowed_assignments([works[8], works[9], works[10], works[11]], group3)
-
- # Objective
- model.minimize(
- sum(selected[i][j] * cost[i][j] for j in all_tasks for i in all_workers)
+ """solve the assignment problem."""
+ # Data.
+ cost = [
+ [90, 76, 75, 70, 50, 74],
+ [35, 85, 55, 65, 48, 101],
+ [125, 95, 90, 105, 59, 120],
+ [45, 110, 95, 115, 104, 83],
+ [60, 105, 80, 75, 59, 62],
+ [45, 65, 110, 95, 47, 31],
+ [38, 51, 107, 41, 69, 99],
+ [47, 85, 57, 71, 92, 77],
+ [39, 63, 97, 49, 118, 56],
+ [47, 101, 71, 60, 88, 109],
+ [17, 39, 103, 64, 61, 92],
+ [101, 45, 83, 59, 92, 27],
+ ]
+
+ group1 = [
+ [0, 0, 1, 1], # Workers 2, 3
+ [0, 1, 0, 1], # Workers 1, 3
+ [0, 1, 1, 0], # Workers 1, 2
+ [1, 1, 0, 0], # Workers 0, 1
+ [1, 0, 1, 0], # Workers 0, 2
+ ]
+
+ group2 = [
+ [0, 0, 1, 1], # Workers 6, 7
+ [0, 1, 0, 1], # Workers 5, 7
+ [0, 1, 1, 0], # Workers 5, 6
+ [1, 1, 0, 0], # Workers 4, 5
+ [1, 0, 0, 1], # Workers 4, 7
+ ]
+
+ group3 = [
+ [0, 0, 1, 1], # Workers 10, 11
+ [0, 1, 0, 1], # Workers 9, 11
+ [0, 1, 1, 0], # Workers 9, 10
+ [1, 0, 1, 0], # Workers 8, 10
+ [1, 0, 0, 1], # Workers 8, 11
+ ]
+
+ sizes = [10, 7, 3, 12, 15, 4, 11, 5]
+ total_size_max = 15
+ num_workers = len(cost)
+ num_tasks = len(cost[1])
+ all_workers = range(num_workers)
+ all_tasks = range(num_tasks)
+
+ # Model.
+
+ model = cp_model.CpModel()
+ # Variables
+ selected = [
+ [model.new_bool_var(f"x[{i},{j}]") for j in all_tasks]
+ for i in all_workers
+ ]
+ works = [model.new_bool_var(f"works[{i}]") for i in all_workers]
+
+ # Constraints
+
+ # Link selected and workers.
+ for i in range(num_workers):
+ model.add_max_equality(works[i], selected[i])
+
+ # Each task is assigned to at least one worker.
+ for j in all_tasks:
+ model.add(sum(selected[i][j] for i in all_workers) >= 1)
+
+ # Total task size for each worker is at most total_size_max
+ for i in all_workers:
+ model.add(
+ sum(sizes[j] * selected[i][j] for j in all_tasks) <= total_size_max
)
- # Solve and output solution.
- solver = cp_model.CpSolver()
- status = solver.solve(model)
-
- if status == cp_model.OPTIMAL:
- print(f"Total cost = {solver.objective_value}")
- print()
- for i in all_workers:
- for j in all_tasks:
- if solver.boolean_value(selected[i][j]):
- print(f"Worker {i} assigned to task {j} with Cost = {cost[i][j]}")
+ # Group constraints.
+ model.add_allowed_assignments(
+ [works[0], works[1], works[2], works[3]], group1
+ )
+ model.add_allowed_assignments(
+ [works[4], works[5], works[6], works[7]], group2
+ )
+ model.add_allowed_assignments(
+ [works[8], works[9], works[10], works[11]], group3
+ )
+
+ # Objective
+ model.minimize(
+ sum(selected[i][j] * cost[i][j] for j in all_tasks for i in all_workers)
+ )
+
+ # Solve and output solution.
+ solver = cp_model.CpSolver()
+ status = solver.solve(model)
+
+ if status == cp_model.OPTIMAL:
+ print(f"Total cost = {solver.objective_value}")
+ print()
+ for i in all_workers:
+ for j in all_tasks:
+ if solver.boolean_value(selected[i][j]):
+ print(f"Worker {i} assigned to task {j} with Cost = {cost[i][j]}")
- print()
+ print()
- print(solver.response_stats())
+ print(solver.response_stats())
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- solve_assignment()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ solve_assignment()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/balance_group_sat.py b/examples/python/balance_group_sat.py
index 5f37f95605f..1c617c7101d 100644
--- a/examples/python/balance_group_sat.py
+++ b/examples/python/balance_group_sat.py
@@ -28,157 +28,162 @@
# Create a solution printer.
class SolutionPrinter(cp_model.CpSolverSolutionCallback):
- """Print intermediate solutions."""
-
- def __init__(self, values, colors, all_groups, all_items, item_in_group):
- cp_model.CpSolverSolutionCallback.__init__(self)
- self.__solution_count = 0
- self.__values = values
- self.__colors = colors
- self.__all_groups = all_groups
- self.__all_items = all_items
- self.__item_in_group = item_in_group
-
- def on_solution_callback(self):
- print(f"Solution {self.__solution_count}")
- self.__solution_count += 1
-
- print(f" objective value = {self.objective_value}")
- groups = {}
- sums = {}
- for g in self.__all_groups:
- groups[g] = []
- sums[g] = 0
- for item in self.__all_items:
- if self.boolean_value(self.__item_in_group[(item, g)]):
- groups[g].append(item)
- sums[g] += self.__values[item]
-
- for g in self.__all_groups:
- group = groups[g]
- print(f"group {g}: sum = {sums[g]:0.2f} [", end="")
- for item in group:
- value = self.__values[item]
- color = self.__colors[item]
- print(f" ({item}, {value}, {color})", end="")
- print("]")
+ """Print intermediate solutions."""
+
+ def __init__(self, values, colors, all_groups, all_items, item_in_group):
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self.__solution_count = 0
+ self.__values = values
+ self.__colors = colors
+ self.__all_groups = all_groups
+ self.__all_items = all_items
+ self.__item_in_group = item_in_group
+
+ def on_solution_callback(self):
+ print(f"Solution {self.__solution_count}")
+ self.__solution_count += 1
+
+ print(f" objective value = {self.objective_value}")
+ groups = {}
+ sums = {}
+ for g in self.__all_groups:
+ groups[g] = []
+ sums[g] = 0
+ for item in self.__all_items:
+ if self.boolean_value(self.__item_in_group[(item, g)]):
+ groups[g].append(item)
+ sums[g] += self.__values[item]
+
+ for g in self.__all_groups:
+ group = groups[g]
+ print(f"group {g}: sum = {sums[g]:0.2f} [", end="")
+ for item in group:
+ value = self.__values[item]
+ color = self.__colors[item]
+ print(f" ({item}, {value}, {color})", end="")
+ print("]")
def main(argv: Sequence[str]) -> None:
- """Solves a group balancing problem."""
-
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- # Data.
- num_groups = 10
- num_items = 100
- num_colors = 3
- min_items_of_same_color_per_group = 4
-
- all_groups = range(num_groups)
- all_items = range(num_items)
- all_colors = range(num_colors)
-
- # values for each items.
- values = [1 + i + (i * i // 200) for i in all_items]
- # Color for each item (simple modulo).
- colors = [i % num_colors for i in all_items]
-
- sum_of_values = sum(values)
- average_sum_per_group = sum_of_values // num_groups
-
- num_items_per_group = num_items // num_groups
-
- # Collect all items in a given color.
- items_per_color: Dict[int, list[int]] = {}
- for color in all_colors:
- items_per_color[color] = []
- for i in all_items:
- if colors[i] == color:
- items_per_color[color].append(i)
-
- print(
- f"Model has {num_items} items, {num_groups} groups, and" f" {num_colors} colors"
- )
- print(f" average sum per group = {average_sum_per_group}")
+ """Solves a group balancing problem."""
+
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ # Data.
+ num_groups = 10
+ num_items = 100
+ num_colors = 3
+ min_items_of_same_color_per_group = 4
+
+ all_groups = range(num_groups)
+ all_items = range(num_items)
+ all_colors = range(num_colors)
+
+ # values for each items.
+ values = [1 + i + (i * i // 200) for i in all_items]
+ # Color for each item (simple modulo).
+ colors = [i % num_colors for i in all_items]
+
+ sum_of_values = sum(values)
+ average_sum_per_group = sum_of_values // num_groups
+
+ num_items_per_group = num_items // num_groups
+
+ # Collect all items in a given color.
+ items_per_color: Dict[int, list[int]] = {}
+ for color in all_colors:
+ items_per_color[color] = []
+ for i in all_items:
+ if colors[i] == color:
+ items_per_color[color].append(i)
- # Model.
+ print(
+ f"Model has {num_items} items, {num_groups} groups, and"
+ f" {num_colors} colors"
+ )
+ print(f" average sum per group = {average_sum_per_group}")
- model = cp_model.CpModel()
+ # Model.
- item_in_group = {}
- for i in all_items:
- for g in all_groups:
- item_in_group[(i, g)] = model.new_bool_var(f"item {i} in group {g}")
+ model = cp_model.CpModel()
- # Each group must have the same size.
+ item_in_group = {}
+ for i in all_items:
for g in all_groups:
- model.add(sum(item_in_group[(i, g)] for i in all_items) == num_items_per_group)
+ item_in_group[(i, g)] = model.new_bool_var(f"item {i} in group {g}")
- # One item must belong to exactly one group.
- for i in all_items:
- model.add(sum(item_in_group[(i, g)] for g in all_groups) == 1)
+ # Each group must have the same size.
+ for g in all_groups:
+ model.add(
+ sum(item_in_group[(i, g)] for i in all_items) == num_items_per_group
+ )
+
+ # One item must belong to exactly one group.
+ for i in all_items:
+ model.add(sum(item_in_group[(i, g)] for g in all_groups) == 1)
+
+ # The deviation of the sum of each items in a group against the average.
+ e = model.new_int_var(0, 550, "epsilon")
+
+ # Constrain the sum of values in one group around the average sum per group.
+ for g in all_groups:
+ model.add(
+ sum(item_in_group[(i, g)] * values[i] for i in all_items)
+ <= average_sum_per_group + e
+ )
+ model.add(
+ sum(item_in_group[(i, g)] * values[i] for i in all_items)
+ >= average_sum_per_group - e
+ )
- # The deviation of the sum of each items in a group against the average.
- e = model.new_int_var(0, 550, "epsilon")
+ # color_in_group variables.
+ color_in_group = {}
+ for g in all_groups:
+ for c in all_colors:
+ color_in_group[(c, g)] = model.new_bool_var(f"color {c} is in group {g}")
- # Constrain the sum of values in one group around the average sum per group.
+ # Item is in a group implies its color is in that group.
+ for i in all_items:
for g in all_groups:
- model.add(
- sum(item_in_group[(i, g)] * values[i] for i in all_items)
- <= average_sum_per_group + e
- )
- model.add(
- sum(item_in_group[(i, g)] * values[i] for i in all_items)
- >= average_sum_per_group - e
- )
-
- # color_in_group variables.
- color_in_group = {}
+ model.add_implication(
+ item_in_group[(i, g)], color_in_group[(colors[i], g)]
+ )
+
+ # If a color is in a group, it must contains at least
+ # min_items_of_same_color_per_group items from that color.
+ for c in all_colors:
for g in all_groups:
- for c in all_colors:
- color_in_group[(c, g)] = model.new_bool_var(f"color {c} is in group {g}")
+ literal = color_in_group[(c, g)]
+ model.add(
+ sum(item_in_group[(i, g)] for i in items_per_color[c])
+ >= min_items_of_same_color_per_group
+ ).only_enforce_if(literal)
- # Item is in a group implies its color is in that group.
- for i in all_items:
- for g in all_groups:
- model.add_implication(item_in_group[(i, g)], color_in_group[(colors[i], g)])
+ # Compute the maximum number of colors in a group.
+ max_color = num_items_per_group // min_items_of_same_color_per_group
- # If a color is in a group, it must contains at least
- # min_items_of_same_color_per_group items from that color.
- for c in all_colors:
- for g in all_groups:
- literal = color_in_group[(c, g)]
- model.add(
- sum(item_in_group[(i, g)] for i in items_per_color[c])
- >= min_items_of_same_color_per_group
- ).only_enforce_if(literal)
-
- # Compute the maximum number of colors in a group.
- max_color = num_items_per_group // min_items_of_same_color_per_group
-
- # Redundant constraint, it helps with solving time.
- if max_color < num_colors:
- for g in all_groups:
- model.add(sum(color_in_group[(c, g)] for c in all_colors) <= max_color)
-
- # minimize epsilon
- model.minimize(e)
-
- solver = cp_model.CpSolver()
- # solver.parameters.log_search_progress = True
- solver.parameters.num_workers = 16
- solution_printer = SolutionPrinter(
- values, colors, all_groups, all_items, item_in_group
- )
- status = solver.solve(model, solution_printer)
+ # Redundant constraint, it helps with solving time.
+ if max_color < num_colors:
+ for g in all_groups:
+ model.add(sum(color_in_group[(c, g)] for c in all_colors) <= max_color)
+
+ # minimize epsilon
+ model.minimize(e)
+
+ solver = cp_model.CpSolver()
+ # solver.parameters.log_search_progress = True
+ solver.parameters.num_workers = 16
+ solution_printer = SolutionPrinter(
+ values, colors, all_groups, all_items, item_in_group
+ )
+ status = solver.solve(model, solution_printer)
- if status == cp_model.OPTIMAL:
- print(f"Optimal epsilon: {solver.objective_value}")
- print(solver.response_stats())
- else:
- print("No solution found")
+ if status == cp_model.OPTIMAL:
+ print(f"Optimal epsilon: {solver.objective_value}")
+ print(solver.response_stats())
+ else:
+ print("No solution found")
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/bus_driver_scheduling_flow_sat.py b/examples/python/bus_driver_scheduling_flow_sat.py
index 275a8b41310..aa8368d2f52 100644
--- a/examples/python/bus_driver_scheduling_flow_sat.py
+++ b/examples/python/bus_driver_scheduling_flow_sat.py
@@ -29,11 +29,13 @@
from ortools.sat.python import cp_model
PARSER = argparse.ArgumentParser()
-PARSER.add_argument("--instance", default=1, type=int, help="Instance number (1..3).")
+PARSER.add_argument(
+ "--instance", default=1, type=int, help="Instance number (1..3)."
+)
PARSER.add_argument(
"--output_proto_file",
default="",
- help="Output file to write the cp_model" "proto to.",
+ help="Output file to write the cp_modelproto to.",
)
PARSER.add_argument("--params", default="", help="Sat solver parameters.")
@@ -1663,165 +1665,169 @@
def find_minimum_number_of_drivers(shifts, params):
- """Minimize the number of needed drivers."""
+ """Minimize the number of needed drivers."""
- num_shifts = len(shifts)
+ num_shifts = len(shifts)
- # All durations are in minutes.
- max_driving_time = 540 # 8 hours.
- max_driving_time_without_pauses = 240 # 4 hours
- min_pause_after_4h = 30
- min_delay_between_shifts = 2
- max_working_time = 720
- min_working_time = 390 # 6.5 hours
- extra_time = 10 + 25
- max_break = 180
+ # All durations are in minutes.
+ max_driving_time = 540 # 8 hours.
+ max_driving_time_without_pauses = 240 # 4 hours
+ min_pause_after_4h = 30
+ min_delay_between_shifts = 2
+ max_working_time = 720
+ min_working_time = 390 # 6.5 hours
+ extra_time = 10 + 25
+ max_break = 180
- # Computed data.
- total_driving_time = sum(shift[5] for shift in shifts)
- min_num_drivers = int(math.ceil(total_driving_time * 1.0 / max_driving_time))
- min_start_time = min(shift[3] for shift in shifts)
- max_end_time = max(shift[4] for shift in shifts)
+ # Computed data.
+ total_driving_time = sum(shift[5] for shift in shifts)
+ min_num_drivers = int(math.ceil(total_driving_time * 1.0 / max_driving_time))
+ min_start_time = min(shift[3] for shift in shifts)
+ max_end_time = max(shift[4] for shift in shifts)
- print("Bus driver scheduling")
- print(" num shifts =", num_shifts)
- print(" total driving time =", total_driving_time, "minutes")
- print(" min num drivers =", min_num_drivers)
- print(" min start time =", min_start_time)
- print(" max end time =", max_end_time)
+ print("Bus driver scheduling")
+ print(" num shifts =", num_shifts)
+ print(" total driving time =", total_driving_time, "minutes")
+ print(" min num drivers =", min_num_drivers)
+ print(" min start time =", min_start_time)
+ print(" max end time =", max_end_time)
- # We are going to build a flow from a the start of the day to the end
- # of the day.
- #
- # Along the path, we will accumulate driving time, accrued time since the
- # last break, and total working time.
+ # We are going to build a flow from a the start of the day to the end
+ # of the day.
+ #
+ # Along the path, we will accumulate driving time, accrued time since the
+ # last break, and total working time.
- model = cp_model.CpModel()
+ model = cp_model.CpModel()
- # Per node info
- driving_time = {}
- working_time = {}
- no_break_driving_time = {}
+ # Per node info
+ driving_time = {}
+ working_time = {}
+ no_break_driving_time = {}
- incoming_literals = collections.defaultdict(list)
- outgoing_literals = collections.defaultdict(list)
- outgoing_source_literals = []
- incoming_sink_literals = []
+ incoming_literals = collections.defaultdict(list)
+ outgoing_literals = collections.defaultdict(list)
+ outgoing_source_literals = []
+ incoming_sink_literals = []
- all_literals = []
+ all_literals = []
- # Create all the shift variables before iterating on the transitions
- # between these shifts.
- for shift in range(num_shifts):
- driving_time[shift] = model.NewIntVar(0, max_driving_time, "dt_%i" % shift)
- no_break_driving_time[shift] = model.NewIntVar(
- 0, max_driving_time_without_pauses, "nbdt_%i" % shift
- )
- working_time[shift] = model.NewIntVar(0, max_working_time, "wt_%i" % shift)
+ # Create all the shift variables before iterating on the transitions
+ # between these shifts.
+ for shift in range(num_shifts):
+ driving_time[shift] = model.NewIntVar(0, max_driving_time, "dt_%i" % shift)
+ no_break_driving_time[shift] = model.NewIntVar(
+ 0, max_driving_time_without_pauses, "nbdt_%i" % shift
+ )
+ working_time[shift] = model.NewIntVar(0, max_working_time, "wt_%i" % shift)
- for shift in range(num_shifts):
- duration = shifts[shift][5]
+ for shift in range(num_shifts):
+ duration = shifts[shift][5]
- # Arc from source to shift.
- # - set the working time of the driver
- # - increase driving time and driving time since the last break
- source_lit = model.NewBoolVar("from source to %i" % shift)
- all_literals.append(source_lit)
- outgoing_source_literals.append(source_lit)
- incoming_literals[shift].append(source_lit)
- model.Add(driving_time[shift] == duration).OnlyEnforceIf(source_lit)
- model.Add(no_break_driving_time[shift] == duration).OnlyEnforceIf(source_lit)
- model.Add(working_time[shift] == duration + extra_time).OnlyEnforceIf(
- source_lit
- )
+ # Arc from source to shift.
+ # - set the working time of the driver
+ # - increase driving time and driving time since the last break
+ source_lit = model.NewBoolVar("from source to %i" % shift)
+ all_literals.append(source_lit)
+ outgoing_source_literals.append(source_lit)
+ incoming_literals[shift].append(source_lit)
+ model.Add(driving_time[shift] == duration).OnlyEnforceIf(source_lit)
+ model.Add(no_break_driving_time[shift] == duration).OnlyEnforceIf(
+ source_lit
+ )
+ model.Add(working_time[shift] == duration + extra_time).OnlyEnforceIf(
+ source_lit
+ )
- # Arc from shift to sink
- # - checks that working time is greater than min_working_time
- sink_lit = model.NewBoolVar("from %i to sink" % shift)
- all_literals.append(sink_lit)
- outgoing_literals[shift].append(sink_lit)
- incoming_sink_literals.append(sink_lit)
- model.Add(working_time[shift] >= min_working_time).OnlyEnforceIf(sink_lit)
+ # Arc from shift to sink
+ # - checks that working time is greater than min_working_time
+ sink_lit = model.NewBoolVar("from %i to sink" % shift)
+ all_literals.append(sink_lit)
+ outgoing_literals[shift].append(sink_lit)
+ incoming_sink_literals.append(sink_lit)
+ model.Add(working_time[shift] >= min_working_time).OnlyEnforceIf(sink_lit)
- for other in range(num_shifts):
- delay = shifts[other][3] - shifts[shift][4]
- if delay < min_delay_between_shifts:
- continue
- if delay > max_break:
- break # Assumes start times are sorted.
- other_duration = shifts[other][5]
- lit = model.NewBoolVar("from %i to %i" % (shift, other))
- all_literals.append(lit)
+ for other in range(num_shifts):
+ delay = shifts[other][3] - shifts[shift][4]
+ if delay < min_delay_between_shifts:
+ continue
+ if delay > max_break:
+ break # Assumes start times are sorted.
+ other_duration = shifts[other][5]
+ lit = model.NewBoolVar("from %i to %i" % (shift, other))
+ all_literals.append(lit)
- # Increase driving time
- model.Add(
- driving_time[other] == driving_time[shift] + other_duration
- ).OnlyEnforceIf(lit)
+ # Increase driving time
+ model.Add(
+ driving_time[other] == driving_time[shift] + other_duration
+ ).OnlyEnforceIf(lit)
- # Increase no_break_driving or reset it to 0 depending on the delay
- if delay >= min_pause_after_4h:
- model.Add(no_break_driving_time[other] == other_duration).OnlyEnforceIf(
- lit
- )
- else:
- model.Add(
- no_break_driving_time[other]
- == no_break_driving_time[shift] + other_duration
- ).OnlyEnforceIf(lit)
+ # Increase no_break_driving or reset it to 0 depending on the delay
+ if delay >= min_pause_after_4h:
+ model.Add(no_break_driving_time[other] == other_duration).OnlyEnforceIf(
+ lit
+ )
+ else:
+ model.Add(
+ no_break_driving_time[other]
+ == no_break_driving_time[shift] + other_duration
+ ).OnlyEnforceIf(lit)
- # Increase working time
- model.Add(
- working_time[other] == working_time[shift] + delay + other_duration
- ).OnlyEnforceIf(lit)
+ # Increase working time
+ model.Add(
+ working_time[other] == working_time[shift] + delay + other_duration
+ ).OnlyEnforceIf(lit)
- # Add arc
- outgoing_literals[shift].append(lit)
- incoming_literals[other].append(lit)
+ # Add arc
+ outgoing_literals[shift].append(lit)
+ incoming_literals[other].append(lit)
- # Create dag constraint.
- for shift in range(num_shifts):
- model.Add(sum(outgoing_literals[shift]) == 1)
- model.Add(sum(incoming_literals[shift]) == 1)
+ # Create dag constraint.
+ for shift in range(num_shifts):
+ model.Add(sum(outgoing_literals[shift]) == 1)
+ model.Add(sum(incoming_literals[shift]) == 1)
- # Num drivers
- num_drivers = model.NewIntVar(min_num_drivers, min_num_drivers * 3, "num_drivers")
- model.Add(sum(incoming_sink_literals) == num_drivers)
- model.Add(sum(outgoing_source_literals) == num_drivers)
+ # Num drivers
+ num_drivers = model.NewIntVar(
+ min_num_drivers, min_num_drivers * 3, "num_drivers"
+ )
+ model.Add(sum(incoming_sink_literals) == num_drivers)
+ model.Add(sum(outgoing_source_literals) == num_drivers)
- model.Minimize(num_drivers)
+ model.Minimize(num_drivers)
- # Solve model.
- solver = cp_model.CpSolver()
- solver.parameters.log_search_progress = True
- # solver.parameters.num_search_workers = 16
- # solver.parameters.boolean_encoding_level = 0
- # solver.parameters.lns_focus_on_decision_variables = True
- status = solver.Solve(model)
+ # Solve model.
+ solver = cp_model.CpSolver()
+ solver.parameters.log_search_progress = True
+ # solver.parameters.num_search_workers = 16
+ # solver.parameters.boolean_encoding_level = 0
+ # solver.parameters.lns_focus_on_decision_variables = True
+ status = solver.Solve(model)
- if status != cp_model.OPTIMAL and status != cp_model.FEASIBLE:
- return -1
+ if status != cp_model.OPTIMAL and status != cp_model.FEASIBLE:
+ return -1
- # Display solution
- optimal_num_drivers = int(solver.ObjectiveValue())
- print("minimal number of drivers =", optimal_num_drivers)
- return optimal_num_drivers
+ # Display solution
+ optimal_num_drivers = int(solver.ObjectiveValue())
+ print("minimal number of drivers =", optimal_num_drivers)
+ return optimal_num_drivers
def main(args):
- """Optimize the bus driver allocation in two passes."""
- print("----------- first pass: minimize the number of drivers")
- shifts = []
- if args.instance == 1:
- shifts = SAMPLE_SHIFTS_SMALL
- elif args.instance == 2:
- shifts = SAMPLE_SHIFTS_MEDIUM
- elif args.instance == 3:
- shifts = SAMPLE_SHIFTS_LARGE
- num_drivers = find_minimum_number_of_drivers(shifts, args.params)
+ """Optimize the bus driver allocation in two passes."""
+ print("----------- first pass: minimize the number of drivers")
+ shifts = []
+ if args.instance == 1:
+ shifts = SAMPLE_SHIFTS_SMALL
+ elif args.instance == 2:
+ shifts = SAMPLE_SHIFTS_MEDIUM
+ elif args.instance == 3:
+ shifts = SAMPLE_SHIFTS_LARGE
+ num_drivers = find_minimum_number_of_drivers(shifts, args.params)
- print("----------- second pass: minimize the sum of working times")
- # bus_driver_scheduling(False, num_drivers)
+ print("----------- second pass: minimize the sum of working times")
+ # bus_driver_scheduling(False, num_drivers)
if __name__ == "__main__":
- main(PARSER.parse_args())
+ main(PARSER.parse_args())
diff --git a/examples/python/bus_driver_scheduling_sat.py b/examples/python/bus_driver_scheduling_sat.py
index 0a957febd2e..6423fe06ffc 100644
--- a/examples/python/bus_driver_scheduling_sat.py
+++ b/examples/python/bus_driver_scheduling_sat.py
@@ -1709,327 +1709,339 @@
def bus_driver_scheduling(minimize_drivers: bool, max_num_drivers: int) -> int:
- """Optimize the bus driver scheduling problem.
+ """Optimize the bus driver scheduling problem.
- This model has two modes.
+ This model has two modes.
- If minimize_drivers == True, the objective will be to find the minimal
- number of drivers, independently of the working times of each drivers.
+ If minimize_drivers == True, the objective will be to find the minimal
+ number of drivers, independently of the working times of each drivers.
- Otherwise, will will create max_num_drivers non optional drivers, and
- minimize the sum of working times of these drivers.
+ Otherwise, will will create max_num_drivers non optional drivers, and
+ minimize the sum of working times of these drivers.
- Args:
- minimize_drivers: A Boolean parameter specifying the objective of the
- problem. If True, it tries to minimize the number of used drivers. If
- false, it minimizes the sum of working times per workers.
- max_num_drivers: This number specifies the exact number of non optional
- drivers to use. This is only used if 'minimize_drivers' is False.
+ Args:
+ minimize_drivers: A Boolean parameter specifying the objective of the
+ problem. If True, it tries to minimize the number of used drivers. If
+ false, it minimizes the sum of working times per workers.
+ max_num_drivers: This number specifies the exact number of non optional
+ drivers to use. This is only used if 'minimize_drivers' is False.
- Returns:
- The objective value of the model.
- """
- shifts = None
- if _INSTANCE.value == 0:
- shifts = SAMPLE_SHIFTS_TINY
- elif _INSTANCE.value == 1:
- shifts = SAMPLE_SHIFTS_SMALL
- elif _INSTANCE.value == 2:
- shifts = SAMPLE_SHIFTS_MEDIUM
- elif _INSTANCE.value == 3:
- shifts = SAMPLE_SHIFTS_LARGE
+ Returns:
+ The objective value of the model.
+ """
+ shifts = None
+ if _INSTANCE.value == 0:
+ shifts = SAMPLE_SHIFTS_TINY
+ elif _INSTANCE.value == 1:
+ shifts = SAMPLE_SHIFTS_SMALL
+ elif _INSTANCE.value == 2:
+ shifts = SAMPLE_SHIFTS_MEDIUM
+ elif _INSTANCE.value == 3:
+ shifts = SAMPLE_SHIFTS_LARGE
- num_shifts = len(shifts)
+ num_shifts = len(shifts)
- # All durations are in minutes.
- max_driving_time = 540 # 8 hours.
- max_driving_time_without_pauses = 240 # 4 hours
- min_pause_after_4h = 30
- min_delay_between_shifts = 2
- max_working_time = 720
- min_working_time = 390 # 6.5 hours
- setup_time = 10
- cleanup_time = 15
+ # All durations are in minutes.
+ max_driving_time = 540 # 8 hours.
+ max_driving_time_without_pauses = 240 # 4 hours
+ min_pause_after_4h = 30
+ min_delay_between_shifts = 2
+ max_working_time = 720
+ min_working_time = 390 # 6.5 hours
+ setup_time = 10
+ cleanup_time = 15
- # Computed data.
- total_driving_time = sum(shift[5] for shift in shifts)
- min_num_drivers = int(math.ceil(total_driving_time * 1.0 / max_driving_time))
- num_drivers = 2 * min_num_drivers if minimize_drivers else max_num_drivers
- min_start_time = min(shift[3] for shift in shifts)
- max_end_time = max(shift[4] for shift in shifts)
+ # Computed data.
+ total_driving_time = sum(shift[5] for shift in shifts)
+ min_num_drivers = int(math.ceil(total_driving_time * 1.0 / max_driving_time))
+ num_drivers = 2 * min_num_drivers if minimize_drivers else max_num_drivers
+ min_start_time = min(shift[3] for shift in shifts)
+ max_end_time = max(shift[4] for shift in shifts)
- print("Bus driver scheduling")
- print(" num shifts =", num_shifts)
- print(" total driving time =", total_driving_time, "minutes")
- print(" min num drivers =", min_num_drivers)
- print(" num drivers =", num_drivers)
- print(" min start time =", min_start_time)
- print(" max end time =", max_end_time)
+ print("Bus driver scheduling")
+ print(" num shifts =", num_shifts)
+ print(" total driving time =", total_driving_time, "minutes")
+ print(" min num drivers =", min_num_drivers)
+ print(" num drivers =", num_drivers)
+ print(" min start time =", min_start_time)
+ print(" max end time =", max_end_time)
- model = cp_model.CpModel()
+ model = cp_model.CpModel()
- # For each driver and each shift, we store:
- # - the total driving time including this shift
- # - the acrued driving time since the last 30 minute break
- # Special arcs have the following effect:
- # - 'from source to shift' sets the starting time and accumulate the first
- # shift
- # - 'from shift to end' sets the ending time, and fill the driving_times
- # variable
- # Arcs between two shifts have the following impact
- # - add the duration of the shift to the total driving time
- # - reset the accumulated driving time if the distance between the two
- # shifts is more than 30 minutes, add the duration of the shift to the
- # accumulated driving time since the last break otherwise
+ # For each driver and each shift, we store:
+ # - the total driving time including this shift
+ # - the acrued driving time since the last 30 minute break
+ # Special arcs have the following effect:
+ # - 'from source to shift' sets the starting time and accumulate the first
+ # shift
+ # - 'from shift to end' sets the ending time, and fill the driving_times
+ # variable
+ # Arcs between two shifts have the following impact
+ # - add the duration of the shift to the total driving time
+ # - reset the accumulated driving time if the distance between the two
+ # shifts is more than 30 minutes, add the duration of the shift to the
+ # accumulated driving time since the last break otherwise
- # Per (driver, node) info (driving time, performed,
- # driving time since break)
- total_driving = {}
- no_break_driving = {}
- performed = {}
- starting_shifts = {}
+ # Per (driver, node) info (driving time, performed,
+ # driving time since break)
+ total_driving = {}
+ no_break_driving = {}
+ performed = {}
+ starting_shifts = {}
- # Per driver info (start, end, driving times, is working)
- start_times = []
- end_times = []
- driving_times = []
- working_drivers = []
- working_times = []
+ # Per driver info (start, end, driving times, is working)
+ start_times = []
+ end_times = []
+ driving_times = []
+ working_drivers = []
+ working_times = []
- # Weighted objective
- delay_literals = []
- delay_weights = []
+ # Weighted objective
+ delay_literals = []
+ delay_weights = []
- # Used to propagate more between drivers
- shared_incoming_literals = collections.defaultdict(list)
- shared_outgoing_literals = collections.defaultdict(list)
+ # Used to propagate more between drivers
+ shared_incoming_literals = collections.defaultdict(list)
+ shared_outgoing_literals = collections.defaultdict(list)
- for d in range(num_drivers):
- start_times.append(
- model.new_int_var(min_start_time - setup_time, max_end_time, "start_%i" % d)
+ for d in range(num_drivers):
+ start_times.append(
+ model.new_int_var(
+ min_start_time - setup_time, max_end_time, "start_%i" % d
)
- end_times.append(
- model.new_int_var(min_start_time, max_end_time + cleanup_time, "end_%i" % d)
+ )
+ end_times.append(
+ model.new_int_var(
+ min_start_time, max_end_time + cleanup_time, "end_%i" % d
)
- driving_times.append(model.new_int_var(0, max_driving_time, "driving_%i" % d))
- working_times.append(
- model.new_int_var(0, max_working_time, "working_times_%i" % d)
- )
-
- incoming_literals = collections.defaultdict(list)
- outgoing_literals = collections.defaultdict(list)
- outgoing_source_literals = []
- incoming_sink_literals = []
+ )
+ driving_times.append(
+ model.new_int_var(0, max_driving_time, "driving_%i" % d)
+ )
+ working_times.append(
+ model.new_int_var(0, max_working_time, "working_times_%i" % d)
+ )
- # Create all the shift variables before iterating on the transitions
- # between these shifts.
- for s in range(num_shifts):
- total_driving[d, s] = model.new_int_var(
- 0, max_driving_time, "dr_%i_%i" % (d, s)
- )
- no_break_driving[d, s] = model.new_int_var(
- 0, max_driving_time_without_pauses, "mdr_%i_%i" % (d, s)
- )
- performed[d, s] = model.new_bool_var("performed_%i_%i" % (d, s))
+ incoming_literals = collections.defaultdict(list)
+ outgoing_literals = collections.defaultdict(list)
+ outgoing_source_literals = []
+ incoming_sink_literals = []
- for s in range(num_shifts):
- shift = shifts[s]
- duration = shift[5]
+ # Create all the shift variables before iterating on the transitions
+ # between these shifts.
+ for s in range(num_shifts):
+ total_driving[d, s] = model.new_int_var(
+ 0, max_driving_time, "dr_%i_%i" % (d, s)
+ )
+ no_break_driving[d, s] = model.new_int_var(
+ 0, max_driving_time_without_pauses, "mdr_%i_%i" % (d, s)
+ )
+ performed[d, s] = model.new_bool_var("performed_%i_%i" % (d, s))
- # Arc from source to shift.
- # - set the start time of the driver
- # - increase driving time and driving time since break
- source_lit = model.new_bool_var("%i from source to %i" % (d, s))
- outgoing_source_literals.append(source_lit)
- incoming_literals[s].append(source_lit)
- shared_incoming_literals[s].append(source_lit)
- model.add(start_times[d] == shift[3] - setup_time).only_enforce_if(
- source_lit
- )
- model.add(total_driving[d, s] == duration).only_enforce_if(source_lit)
- model.add(no_break_driving[d, s] == duration).only_enforce_if(source_lit)
- starting_shifts[d, s] = source_lit
+ for s in range(num_shifts):
+ shift = shifts[s]
+ duration = shift[5]
- # Arc from shift to sink
- # - set the end time of the driver
- # - set the driving times of the driver
- sink_lit = model.new_bool_var("%i from %i to sink" % (d, s))
- outgoing_literals[s].append(sink_lit)
- shared_outgoing_literals[s].append(sink_lit)
- incoming_sink_literals.append(sink_lit)
- model.add(end_times[d] == shift[4] + cleanup_time).only_enforce_if(sink_lit)
- model.add(driving_times[d] == total_driving[d, s]).only_enforce_if(sink_lit)
+ # Arc from source to shift.
+ # - set the start time of the driver
+ # - increase driving time and driving time since break
+ source_lit = model.new_bool_var("%i from source to %i" % (d, s))
+ outgoing_source_literals.append(source_lit)
+ incoming_literals[s].append(source_lit)
+ shared_incoming_literals[s].append(source_lit)
+ model.add(start_times[d] == shift[3] - setup_time).only_enforce_if(
+ source_lit
+ )
+ model.add(total_driving[d, s] == duration).only_enforce_if(source_lit)
+ model.add(no_break_driving[d, s] == duration).only_enforce_if(source_lit)
+ starting_shifts[d, s] = source_lit
- # Node not performed
- # - set both driving times to 0
- # - add a looping arc on the node
- model.add(total_driving[d, s] == 0).only_enforce_if(~performed[d, s])
- model.add(no_break_driving[d, s] == 0).only_enforce_if(~performed[d, s])
- incoming_literals[s].append(~performed[d, s])
- outgoing_literals[s].append(~performed[d, s])
- # negated adding to the shared lists, because, globally, each node will
- # have one incoming literal, and one outgoing literal.
+ # Arc from shift to sink
+ # - set the end time of the driver
+ # - set the driving times of the driver
+ sink_lit = model.new_bool_var("%i from %i to sink" % (d, s))
+ outgoing_literals[s].append(sink_lit)
+ shared_outgoing_literals[s].append(sink_lit)
+ incoming_sink_literals.append(sink_lit)
+ model.add(end_times[d] == shift[4] + cleanup_time).only_enforce_if(
+ sink_lit
+ )
+ model.add(driving_times[d] == total_driving[d, s]).only_enforce_if(
+ sink_lit
+ )
- # Node performed:
- # - add upper bound on start_time
- # - add lower bound on end_times
- model.add(start_times[d] <= shift[3] - setup_time).only_enforce_if(
- performed[d, s]
- )
- model.add(end_times[d] >= shift[4] + cleanup_time).only_enforce_if(
- performed[d, s]
- )
+ # Node not performed
+ # - set both driving times to 0
+ # - add a looping arc on the node
+ model.add(total_driving[d, s] == 0).only_enforce_if(~performed[d, s])
+ model.add(no_break_driving[d, s] == 0).only_enforce_if(~performed[d, s])
+ incoming_literals[s].append(~performed[d, s])
+ outgoing_literals[s].append(~performed[d, s])
+ # negated adding to the shared lists, because, globally, each node will
+ # have one incoming literal, and one outgoing literal.
- for o in range(num_shifts):
- other = shifts[o]
- delay = other[3] - shift[4]
- if delay < min_delay_between_shifts:
- continue
- lit = model.new_bool_var("%i from %i to %i" % (d, s, o))
+ # Node performed:
+ # - add upper bound on start_time
+ # - add lower bound on end_times
+ model.add(start_times[d] <= shift[3] - setup_time).only_enforce_if(
+ performed[d, s]
+ )
+ model.add(end_times[d] >= shift[4] + cleanup_time).only_enforce_if(
+ performed[d, s]
+ )
- # Increase driving time
- model.add(
- total_driving[d, o] == total_driving[d, s] + other[5]
- ).only_enforce_if(lit)
+ for o in range(num_shifts):
+ other = shifts[o]
+ delay = other[3] - shift[4]
+ if delay < min_delay_between_shifts:
+ continue
+ lit = model.new_bool_var("%i from %i to %i" % (d, s, o))
- # Increase no_break_driving or reset it to 0 depending on the delay
- if delay >= min_pause_after_4h:
- model.add(no_break_driving[d, o] == other[5]).only_enforce_if(lit)
- else:
- model.add(
- no_break_driving[d, o] == no_break_driving[d, s] + other[5]
- ).only_enforce_if(lit)
+ # Increase driving time
+ model.add(
+ total_driving[d, o] == total_driving[d, s] + other[5]
+ ).only_enforce_if(lit)
- # add arc
- outgoing_literals[s].append(lit)
- shared_outgoing_literals[s].append(lit)
- incoming_literals[o].append(lit)
- shared_incoming_literals[o].append(lit)
+ # Increase no_break_driving or reset it to 0 depending on the delay
+ if delay >= min_pause_after_4h:
+ model.add(no_break_driving[d, o] == other[5]).only_enforce_if(lit)
+ else:
+ model.add(
+ no_break_driving[d, o] == no_break_driving[d, s] + other[5]
+ ).only_enforce_if(lit)
- # Cost part
- delay_literals.append(lit)
- delay_weights.append(delay)
+ # add arc
+ outgoing_literals[s].append(lit)
+ shared_outgoing_literals[s].append(lit)
+ incoming_literals[o].append(lit)
+ shared_incoming_literals[o].append(lit)
- model.add(working_times[d] == end_times[d] - start_times[d])
+ # Cost part
+ delay_literals.append(lit)
+ delay_weights.append(delay)
- if minimize_drivers:
- # Driver is not working.
- working = model.new_bool_var("working_%i" % d)
- model.add(start_times[d] == min_start_time).only_enforce_if(~working)
- model.add(end_times[d] == min_start_time).only_enforce_if(~working)
- model.add(driving_times[d] == 0).only_enforce_if(~working)
- working_drivers.append(working)
- outgoing_source_literals.append(~working)
- incoming_sink_literals.append(~working)
- # Conditional working time constraints
- model.add(working_times[d] >= min_working_time).only_enforce_if(working)
- model.add(working_times[d] == 0).only_enforce_if(~working)
- else:
- # Working time constraints
- model.add(working_times[d] >= min_working_time)
+ model.add(working_times[d] == end_times[d] - start_times[d])
- # Create circuit constraint.
- model.add_exactly_one(outgoing_source_literals)
- for s in range(num_shifts):
- model.add_exactly_one(outgoing_literals[s])
- model.add_exactly_one(incoming_literals[s])
- model.add_exactly_one(incoming_sink_literals)
+ if minimize_drivers:
+ # Driver is not working.
+ working = model.new_bool_var("working_%i" % d)
+ model.add(start_times[d] == min_start_time).only_enforce_if(~working)
+ model.add(end_times[d] == min_start_time).only_enforce_if(~working)
+ model.add(driving_times[d] == 0).only_enforce_if(~working)
+ working_drivers.append(working)
+ outgoing_source_literals.append(~working)
+ incoming_sink_literals.append(~working)
+ # Conditional working time constraints
+ model.add(working_times[d] >= min_working_time).only_enforce_if(working)
+ model.add(working_times[d] == 0).only_enforce_if(~working)
+ else:
+ # Working time constraints
+ model.add(working_times[d] >= min_working_time)
- # Each shift is covered.
+ # Create circuit constraint.
+ model.add_exactly_one(outgoing_source_literals)
for s in range(num_shifts):
- model.add_exactly_one(performed[d, s] for d in range(num_drivers))
- # Globally, each node has one incoming and one outgoing literal
- model.add_exactly_one(shared_incoming_literals[s])
- model.add_exactly_one(shared_outgoing_literals[s])
+ model.add_exactly_one(outgoing_literals[s])
+ model.add_exactly_one(incoming_literals[s])
+ model.add_exactly_one(incoming_sink_literals)
- # Symmetry breaking
+ # Each shift is covered.
+ for s in range(num_shifts):
+ model.add_exactly_one(performed[d, s] for d in range(num_drivers))
+ # Globally, each node has one incoming and one outgoing literal
+ model.add_exactly_one(shared_incoming_literals[s])
+ model.add_exactly_one(shared_outgoing_literals[s])
- # The first 3 shifts must be performed by 3 different drivers.
- # Let's assign them to the first 3 drivers in sequence
- model.add(starting_shifts[0, 0] == 1)
- model.add(starting_shifts[1, 1] == 1)
- model.add(starting_shifts[2, 2] == 1)
+ # Symmetry breaking
- if minimize_drivers:
- # Push non working drivers to the end
- for d in range(num_drivers - 1):
- model.add_implication(~working_drivers[d], ~working_drivers[d + 1])
+ # The first 3 shifts must be performed by 3 different drivers.
+ # Let's assign them to the first 3 drivers in sequence
+ model.add(starting_shifts[0, 0] == 1)
+ model.add(starting_shifts[1, 1] == 1)
+ model.add(starting_shifts[2, 2] == 1)
- # Redundant constraints: sum of driving times = sum of shift driving times
- model.add(cp_model.LinearExpr.sum(driving_times) == total_driving_time)
- if not minimize_drivers:
- model.add(
- cp_model.LinearExpr.sum(working_times)
- == total_driving_time
- + num_drivers * (setup_time + cleanup_time)
- + cp_model.LinearExpr.weighted_sum(delay_literals, delay_weights)
- )
+ if minimize_drivers:
+ # Push non working drivers to the end
+ for d in range(num_drivers - 1):
+ model.add_implication(~working_drivers[d], ~working_drivers[d + 1])
- if minimize_drivers:
- # minimize the number of working drivers
- model.minimize(cp_model.LinearExpr.sum(working_drivers))
- else:
- # minimize the sum of delays between tasks, which in turns minimize the
- # sum of working times as the total driving time is fixed
- model.minimize(cp_model.LinearExpr.weighted_sum(delay_literals, delay_weights))
+ # Redundant constraints: sum of driving times = sum of shift driving times
+ model.add(cp_model.LinearExpr.sum(driving_times) == total_driving_time)
+ if not minimize_drivers:
+ model.add(
+ cp_model.LinearExpr.sum(working_times)
+ == total_driving_time
+ + num_drivers * (setup_time + cleanup_time)
+ + cp_model.LinearExpr.weighted_sum(delay_literals, delay_weights)
+ )
- if not minimize_drivers and _OUTPUT_PROTO.value:
- print("Writing proto to %s" % _OUTPUT_PROTO.value)
- with open(_OUTPUT_PROTO.value, "w") as text_file:
- text_file.write(str(model))
+ if minimize_drivers:
+ # minimize the number of working drivers
+ model.minimize(cp_model.LinearExpr.sum(working_drivers))
+ else:
+ # minimize the sum of delays between tasks, which in turns minimize the
+ # sum of working times as the total driving time is fixed
+ model.minimize(
+ cp_model.LinearExpr.weighted_sum(delay_literals, delay_weights)
+ )
- # Solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
+ if not minimize_drivers and _OUTPUT_PROTO.value:
+ print("Writing proto to %s" % _OUTPUT_PROTO.value)
+ with open(_OUTPUT_PROTO.value, "w") as text_file:
+ text_file.write(str(model))
- status = solver.solve(model)
+ # Solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
- if status != cp_model.OPTIMAL and status != cp_model.FEASIBLE:
- return -1
+ status = solver.solve(model)
- # Display solution
- if minimize_drivers:
- max_num_drivers = int(solver.objective_value)
- print("minimal number of drivers =", max_num_drivers)
- return max_num_drivers
+ if status != cp_model.OPTIMAL and status != cp_model.FEASIBLE:
+ return -1
- for d in range(num_drivers):
- print("Driver %i: " % (d + 1))
- print(" total driving time =", solver.value(driving_times[d]))
- print(
- " working time =",
- solver.value(working_times[d]) + setup_time + cleanup_time,
- )
+ # Display solution
+ if minimize_drivers:
+ max_num_drivers = int(solver.objective_value)
+ print("minimal number of drivers =", max_num_drivers)
+ return max_num_drivers
+
+ for d in range(num_drivers):
+ print("Driver %i: " % (d + 1))
+ print(" total driving time =", solver.value(driving_times[d]))
+ print(
+ " working time =",
+ solver.value(working_times[d]) + setup_time + cleanup_time,
+ )
- first = True
- for s in range(num_shifts):
- shift = shifts[s]
+ first = True
+ for s in range(num_shifts):
+ shift = shifts[s]
- if not solver.boolean_value(performed[d, s]):
- continue
+ if not solver.boolean_value(performed[d, s]):
+ continue
- # Hack to detect if the waiting time between the last shift and
- # this one exceeds 30 minutes. For this, we look at the
- # no_break_driving which was reinitialized in that case.
- if solver.value(no_break_driving[d, s]) == shift[5] and not first:
- print(" **break**")
- print(" shift ", shift[0], ":", shift[1], "-", shift[2])
- first = False
+ # Hack to detect if the waiting time between the last shift and
+ # this one exceeds 30 minutes. For this, we look at the
+ # no_break_driving which was reinitialized in that case.
+ if solver.value(no_break_driving[d, s]) == shift[5] and not first:
+ print(" **break**")
+ print(" shift ", shift[0], ":", shift[1], "-", shift[2])
+ first = False
- return int(solver.objective_value)
+ return int(solver.objective_value)
def main(_):
- """Optimize the bus driver allocation in two passes."""
- print("----------- first pass: minimize the number of drivers")
- num_drivers = bus_driver_scheduling(True, -1)
- if num_drivers == -1:
- print("no solution found, skipping the final step")
- else:
- print("----------- second pass: minimize the sum of working times")
- bus_driver_scheduling(False, num_drivers)
+ """Optimize the bus driver allocation in two passes."""
+ print("----------- first pass: minimize the number of drivers")
+ num_drivers = bus_driver_scheduling(True, -1)
+ if num_drivers == -1:
+ print("no solution found, skipping the final step")
+ else:
+ print("----------- second pass: minimize the sum of working times")
+ bus_driver_scheduling(False, num_drivers)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/chemical_balance_lp.py b/examples/python/chemical_balance_lp.py
index 82f5b6b6836..ca684cb1299 100755
--- a/examples/python/chemical_balance_lp.py
+++ b/examples/python/chemical_balance_lp.py
@@ -48,24 +48,31 @@
# Model
max_set = [
- min(max_quantities[q][1] / chemical_set[s][q + 1] for q in ALL_PRODUCTS
- if chemical_set[s][q + 1] != 0.0) for s in ALL_SETS
+ min(
+ max_quantities[q][1] / chemical_set[s][q + 1]
+ for q in ALL_PRODUCTS
+ if chemical_set[s][q + 1] != 0.0
+ )
+ for s in ALL_SETS
]
-solver = pywraplp.Solver("chemical_set_lp",
- pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)
+solver = pywraplp.Solver(
+ "chemical_set_lp", pywraplp.Solver.GLOP_LINEAR_PROGRAMMING
+)
set_vars = [solver.NumVar(0, max_set[s], f"set_{s}") for s in ALL_SETS]
epsilon = solver.NumVar(0, 1000, "epsilon")
for p in ALL_PRODUCTS:
- solver.Add(
- sum(chemical_set[s][p + 1] * set_vars[s]
- for s in ALL_SETS) <= max_quantities[p][1])
- solver.Add(
- sum(chemical_set[s][p + 1] * set_vars[s]
- for s in ALL_SETS) >= max_quantities[p][1] - epsilon)
+ solver.Add(
+ sum(chemical_set[s][p + 1] * set_vars[s] for s in ALL_SETS)
+ <= max_quantities[p][1]
+ )
+ solver.Add(
+ sum(chemical_set[s][p + 1] * set_vars[s] for s in ALL_SETS)
+ >= max_quantities[p][1] - epsilon
+ )
solver.Minimize(epsilon)
@@ -85,11 +92,12 @@
print(f"Optimal objective value = {solver.Objective().Value()}")
for s in ALL_SETS:
- print(f" {chemical_set[s][0]} = {set_vars[s].solution_value()}", end=" ")
- print()
+ print(f" {chemical_set[s][0]} = {set_vars[s].solution_value()}", end=" ")
+ print()
for p in ALL_PRODUCTS:
- name = max_quantities[p][0]
- max_quantity = max_quantities[p][1]
- quantity = sum(set_vars[s].solution_value() * chemical_set[s][p + 1]
- for s in ALL_SETS)
- print(f"{name}: {quantity} out of {max_quantity}")
+ name = max_quantities[p][0]
+ max_quantity = max_quantities[p][1]
+ quantity = sum(
+ set_vars[s].solution_value() * chemical_set[s][p + 1] for s in ALL_SETS
+ )
+ print(f"{name}: {quantity} out of {max_quantity}")
diff --git a/examples/python/chemical_balance_sat.py b/examples/python/chemical_balance_sat.py
index 9ea99e81e22..0fcbe682a69 100644
--- a/examples/python/chemical_balance_sat.py
+++ b/examples/python/chemical_balance_sat.py
@@ -26,94 +26,94 @@
def chemical_balance():
- """Solves the chemical balance problem."""
- # Data
- max_quantities = [
- ["N_Total", 1944],
- ["P2O5", 1166.4],
- ["K2O", 1822.5],
- ["CaO", 1458],
- ["MgO", 486],
- ["Fe", 9.7],
- ["B", 2.4],
- ]
-
- chemical_set = [
- ["A", 0, 0, 510, 540, 0, 0, 0],
- ["B", 110, 0, 0, 0, 160, 0, 0],
- ["C", 61, 149, 384, 0, 30, 1, 0.2],
- ["D", 148, 70, 245, 0, 15, 1, 0.2],
- ["E", 160, 158, 161, 0, 10, 1, 0.2],
- ]
-
- num_products = len(max_quantities)
- all_products = range(num_products)
-
- num_sets = len(chemical_set)
- all_sets = range(num_sets)
-
- # Model
-
- model = cp_model.CpModel()
-
- # Scale quantities by 100.
- max_set = [
- int(
- math.ceil(
- min(
- max_quantities[q][1] * 1000 / chemical_set[s][q + 1]
- for q in all_products
- if chemical_set[s][q + 1] != 0
- )
- )
- )
- for s in all_sets
- ]
-
- set_vars = [model.new_int_var(0, max_set[s], f"set_{s}") for s in all_sets]
-
- epsilon = model.new_int_var(0, 10000000, "epsilon")
-
+ """Solves the chemical balance problem."""
+ # Data
+ max_quantities = [
+ ["N_Total", 1944],
+ ["P2O5", 1166.4],
+ ["K2O", 1822.5],
+ ["CaO", 1458],
+ ["MgO", 486],
+ ["Fe", 9.7],
+ ["B", 2.4],
+ ]
+
+ chemical_set = [
+ ["A", 0, 0, 510, 540, 0, 0, 0],
+ ["B", 110, 0, 0, 0, 160, 0, 0],
+ ["C", 61, 149, 384, 0, 30, 1, 0.2],
+ ["D", 148, 70, 245, 0, 15, 1, 0.2],
+ ["E", 160, 158, 161, 0, 10, 1, 0.2],
+ ]
+
+ num_products = len(max_quantities)
+ all_products = range(num_products)
+
+ num_sets = len(chemical_set)
+ all_sets = range(num_sets)
+
+ # Model
+
+ model = cp_model.CpModel()
+
+ # Scale quantities by 100.
+ max_set = [
+ int(
+ math.ceil(
+ min(
+ max_quantities[q][1] * 1000 / chemical_set[s][q + 1]
+ for q in all_products
+ if chemical_set[s][q + 1] != 0
+ )
+ )
+ )
+ for s in all_sets
+ ]
+
+ set_vars = [model.new_int_var(0, max_set[s], f"set_{s}") for s in all_sets]
+
+ epsilon = model.new_int_var(0, 10000000, "epsilon")
+
+ for p in all_products:
+ model.add(
+ sum(int(chemical_set[s][p + 1] * 10) * set_vars[s] for s in all_sets)
+ <= int(max_quantities[p][1] * 10000)
+ )
+ model.add(
+ sum(int(chemical_set[s][p + 1] * 10) * set_vars[s] for s in all_sets)
+ >= int(max_quantities[p][1] * 10000) - epsilon
+ )
+
+ model.minimize(epsilon)
+
+ # Creates a solver and solves.
+ solver = cp_model.CpSolver()
+ status = solver.solve(model)
+ if status == cp_model.OPTIMAL:
+ # The objective value of the solution.
+ print(f"Optimal objective value = {solver.objective_value / 10000.0}")
+
+ for s in all_sets:
+ print(
+ f" {chemical_set[s][0]} = {solver.value(set_vars[s]) / 1000.0}",
+ end=" ",
+ )
+ print()
for p in all_products:
- model.add(
- sum(int(chemical_set[s][p + 1] * 10) * set_vars[s] for s in all_sets)
- <= int(max_quantities[p][1] * 10000)
- )
- model.add(
- sum(int(chemical_set[s][p + 1] * 10) * set_vars[s] for s in all_sets)
- >= int(max_quantities[p][1] * 10000) - epsilon
- )
-
- model.minimize(epsilon)
-
- # Creates a solver and solves.
- solver = cp_model.CpSolver()
- status = solver.solve(model)
- if status == cp_model.OPTIMAL:
- # The objective value of the solution.
- print(f"Optimal objective value = {solver.objective_value / 10000.0}")
-
- for s in all_sets:
- print(
- f" {chemical_set[s][0]} = {solver.value(set_vars[s]) / 1000.0}",
- end=" ",
- )
- print()
- for p in all_products:
- name = max_quantities[p][0]
- max_quantity = max_quantities[p][1]
- quantity = sum(
- solver.value(set_vars[s]) / 1000.0 * chemical_set[s][p + 1]
- for s in all_sets
- )
- print(f"{name}: {quantity:.3f} out of {max_quantity}")
+ name = max_quantities[p][0]
+ max_quantity = max_quantities[p][1]
+ quantity = sum(
+ solver.value(set_vars[s]) / 1000.0 * chemical_set[s][p + 1]
+ for s in all_sets
+ )
+ print(f"{name}: {quantity:.3f} out of {max_quantity}")
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- chemical_balance()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ chemical_balance()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/clustering_sat.py b/examples/python/clustering_sat.py
index b53eddb87bc..76ebcf5ad62 100644
--- a/examples/python/clustering_sat.py
+++ b/examples/python/clustering_sat.py
@@ -66,78 +66,78 @@
def clustering_sat() -> None:
- """Entry point of the program."""
- num_nodes = len(distance_matrix)
- print("Num nodes =", num_nodes)
-
- # Number of groups to split the nodes, must divide num_nodes.
- num_groups = 4
- group_size = num_nodes // num_groups
-
- # Model.
- model = cp_model.CpModel()
-
- # Variables.
- neighbors = {}
- obj_vars = []
- obj_coeffs = []
- for n1 in range(num_nodes - 1):
- for n2 in range(n1 + 1, num_nodes):
- same = model.new_bool_var("neighbors_%i_%i" % (n1, n2))
- neighbors[n1, n2] = same
- obj_vars.append(same)
- obj_coeffs.append(distance_matrix[n1][n2] + distance_matrix[n2][n1])
-
- # Number of neighborss:
- for n in range(num_nodes):
+ """Entry point of the program."""
+ num_nodes = len(distance_matrix)
+ print("Num nodes =", num_nodes)
+
+ # Number of groups to split the nodes, must divide num_nodes.
+ num_groups = 4
+ group_size = num_nodes // num_groups
+
+ # Model.
+ model = cp_model.CpModel()
+
+ # Variables.
+ neighbors = {}
+ obj_vars = []
+ obj_coeffs = []
+ for n1 in range(num_nodes - 1):
+ for n2 in range(n1 + 1, num_nodes):
+ same = model.new_bool_var("neighbors_%i_%i" % (n1, n2))
+ neighbors[n1, n2] = same
+ obj_vars.append(same)
+ obj_coeffs.append(distance_matrix[n1][n2] + distance_matrix[n2][n1])
+
+ # Number of neighborss:
+ for n in range(num_nodes):
+ model.add(
+ sum(neighbors[m, n] for m in range(n))
+ + sum(neighbors[n, m] for m in range(n + 1, num_nodes))
+ == group_size - 1
+ )
+
+ # Enforce transivity on all triplets.
+ for n1 in range(num_nodes - 2):
+ for n2 in range(n1 + 1, num_nodes - 1):
+ for n3 in range(n2 + 1, num_nodes):
model.add(
- sum(neighbors[m, n] for m in range(n))
- + sum(neighbors[n, m] for m in range(n + 1, num_nodes))
- == group_size - 1
+ neighbors[n1, n3] + neighbors[n2, n3] + neighbors[n1, n2] != 2
)
- # Enforce transivity on all triplets.
- for n1 in range(num_nodes - 2):
- for n2 in range(n1 + 1, num_nodes - 1):
- for n3 in range(n2 + 1, num_nodes):
- model.add(
- neighbors[n1, n3] + neighbors[n2, n3] + neighbors[n1, n2] != 2
- )
-
- # Redundant constraints on total sum of neighborss.
- model.add(sum(obj_vars) == num_groups * group_size * (group_size - 1) // 2)
-
- # Minimize weighted sum of arcs.
- model.minimize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
-
- # Solve and print out the solution.
- solver = cp_model.CpSolver()
- solver.parameters.log_search_progress = True
- solver.parameters.num_search_workers = 8
-
- status = solver.solve(model)
- print(solver.response_stats())
-
- if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:
- visited = set()
- for g in range(num_groups):
- for n in range(num_nodes):
- if n not in visited:
- visited.add(n)
- output = str(n)
- for o in range(n + 1, num_nodes):
- if solver.boolean_value(neighbors[n, o]):
- visited.add(o)
- output += " " + str(o)
- print("Group", g, ":", output)
- break
+ # Redundant constraints on total sum of neighborss.
+ model.add(sum(obj_vars) == num_groups * group_size * (group_size - 1) // 2)
+
+ # Minimize weighted sum of arcs.
+ model.minimize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
+
+ # Solve and print out the solution.
+ solver = cp_model.CpSolver()
+ solver.parameters.log_search_progress = True
+ solver.parameters.num_search_workers = 8
+
+ status = solver.solve(model)
+ print(solver.response_stats())
+
+ if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:
+ visited = set()
+ for g in range(num_groups):
+ for n in range(num_nodes):
+ if n not in visited:
+ visited.add(n)
+ output = str(n)
+ for o in range(n + 1, num_nodes):
+ if solver.boolean_value(neighbors[n, o]):
+ visited.add(o)
+ output += " " + str(o)
+ print("Group", g, ":", output)
+ break
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- clustering_sat()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ clustering_sat()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/cover_rectangle_sat.py b/examples/python/cover_rectangle_sat.py
index 81089f682af..32ebbc6cd68 100644
--- a/examples/python/cover_rectangle_sat.py
+++ b/examples/python/cover_rectangle_sat.py
@@ -20,104 +20,104 @@
def cover_rectangle(num_squares: int) -> bool:
- """Try to fill the rectangle with a given number of squares."""
- size_x = 60
- size_y = 50
-
- model = cp_model.CpModel()
-
- areas = []
- sizes = []
- x_intervals = []
- y_intervals = []
- x_starts = []
- y_starts = []
-
- # Creates intervals for the NoOverlap2D and size variables.
+ """Try to fill the rectangle with a given number of squares."""
+ size_x = 60
+ size_y = 50
+
+ model = cp_model.CpModel()
+
+ areas = []
+ sizes = []
+ x_intervals = []
+ y_intervals = []
+ x_starts = []
+ y_starts = []
+
+ # Creates intervals for the NoOverlap2D and size variables.
+ for i in range(num_squares):
+ size = model.new_int_var(1, size_y, "size_%i" % i)
+ start_x = model.new_int_var(0, size_x, "sx_%i" % i)
+ end_x = model.new_int_var(0, size_x, "ex_%i" % i)
+ start_y = model.new_int_var(0, size_y, "sy_%i" % i)
+ end_y = model.new_int_var(0, size_y, "ey_%i" % i)
+
+ interval_x = model.new_interval_var(start_x, size, end_x, "ix_%i" % i)
+ interval_y = model.new_interval_var(start_y, size, end_y, "iy_%i" % i)
+
+ area = model.new_int_var(1, size_y * size_y, "area_%i" % i)
+ model.add_multiplication_equality(area, [size, size])
+
+ areas.append(area)
+ x_intervals.append(interval_x)
+ y_intervals.append(interval_y)
+ sizes.append(size)
+ x_starts.append(start_x)
+ y_starts.append(start_y)
+
+ # Main constraint.
+ model.add_no_overlap_2d(x_intervals, y_intervals)
+
+ # Redundant constraints.
+ model.add_cumulative(x_intervals, sizes, size_y)
+ model.add_cumulative(y_intervals, sizes, size_x)
+
+ # Forces the rectangle to be exactly covered.
+ model.add(sum(areas) == size_x * size_y)
+
+ # Symmetry breaking 1: sizes are ordered.
+ for i in range(num_squares - 1):
+ model.add(sizes[i] <= sizes[i + 1])
+
+ # Define same to be true iff sizes[i] == sizes[i + 1]
+ same = model.new_bool_var("")
+ model.add(sizes[i] == sizes[i + 1]).only_enforce_if(same)
+ model.add(sizes[i] < sizes[i + 1]).only_enforce_if(~same)
+
+ # Tie break with starts.
+ model.add(x_starts[i] <= x_starts[i + 1]).only_enforce_if(same)
+
+ # Symmetry breaking 2: first square in one quadrant.
+ model.add(x_starts[0] < (size_x + 1) // 2)
+ model.add(y_starts[0] < (size_y + 1) // 2)
+
+ # Creates a solver and solves.
+ solver = cp_model.CpSolver()
+ solver.parameters.num_workers = 8
+ solver.parameters.max_time_in_seconds = 10.0
+ status = solver.solve(model)
+ print("%s found in %0.2fs" % (solver.status_name(status), solver.wall_time))
+
+ # Prints solution.
+ solution_found = status == cp_model.OPTIMAL or status == cp_model.FEASIBLE
+ if solution_found:
+ display = [[" " for _ in range(size_x)] for _ in range(size_y)]
for i in range(num_squares):
- size = model.new_int_var(1, size_y, "size_%i" % i)
- start_x = model.new_int_var(0, size_x, "sx_%i" % i)
- end_x = model.new_int_var(0, size_x, "ex_%i" % i)
- start_y = model.new_int_var(0, size_y, "sy_%i" % i)
- end_y = model.new_int_var(0, size_y, "ey_%i" % i)
-
- interval_x = model.new_interval_var(start_x, size, end_x, "ix_%i" % i)
- interval_y = model.new_interval_var(start_y, size, end_y, "iy_%i" % i)
-
- area = model.new_int_var(1, size_y * size_y, "area_%i" % i)
- model.add_multiplication_equality(area, [size, size])
-
- areas.append(area)
- x_intervals.append(interval_x)
- y_intervals.append(interval_y)
- sizes.append(size)
- x_starts.append(start_x)
- y_starts.append(start_y)
-
- # Main constraint.
- model.add_no_overlap_2d(x_intervals, y_intervals)
-
- # Redundant constraints.
- model.add_cumulative(x_intervals, sizes, size_y)
- model.add_cumulative(y_intervals, sizes, size_x)
-
- # Forces the rectangle to be exactly covered.
- model.add(sum(areas) == size_x * size_y)
-
- # Symmetry breaking 1: sizes are ordered.
- for i in range(num_squares - 1):
- model.add(sizes[i] <= sizes[i + 1])
-
- # Define same to be true iff sizes[i] == sizes[i + 1]
- same = model.new_bool_var("")
- model.add(sizes[i] == sizes[i + 1]).only_enforce_if(same)
- model.add(sizes[i] < sizes[i + 1]).only_enforce_if(~same)
-
- # Tie break with starts.
- model.add(x_starts[i] <= x_starts[i + 1]).only_enforce_if(same)
-
- # Symmetry breaking 2: first square in one quadrant.
- model.add(x_starts[0] < (size_x + 1) // 2)
- model.add(y_starts[0] < (size_y + 1) // 2)
-
- # Creates a solver and solves.
- solver = cp_model.CpSolver()
- solver.parameters.num_workers = 8
- solver.parameters.max_time_in_seconds = 10.0
- status = solver.solve(model)
- print("%s found in %0.2fs" % (solver.status_name(status), solver.wall_time))
-
- # Prints solution.
- solution_found = status == cp_model.OPTIMAL or status == cp_model.FEASIBLE
- if solution_found:
- display = [[" " for _ in range(size_x)] for _ in range(size_y)]
- for i in range(num_squares):
- sol_x = solver.value(x_starts[i])
- sol_y = solver.value(y_starts[i])
- sol_s = solver.value(sizes[i])
- char = format(i, "01x")
- for j in range(sol_s):
- for k in range(sol_s):
- if display[sol_y + j][sol_x + k] != " ":
- print(
- "ERROR between %s and %s"
- % (display[sol_y + j][sol_x + k], char)
- )
- display[sol_y + j][sol_x + k] = char
-
- for line in range(size_y):
- print(" ".join(display[line]))
- return solution_found
+ sol_x = solver.value(x_starts[i])
+ sol_y = solver.value(y_starts[i])
+ sol_s = solver.value(sizes[i])
+ char = format(i, "01x")
+ for j in range(sol_s):
+ for k in range(sol_s):
+ if display[sol_y + j][sol_x + k] != " ":
+ print(
+ "ERROR between %s and %s"
+ % (display[sol_y + j][sol_x + k], char)
+ )
+ display[sol_y + j][sol_x + k] = char
+
+ for line in range(size_y):
+ print(" ".join(display[line]))
+ return solution_found
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- for num_squares in range(1, 15):
- print("Trying with size =", num_squares)
- if cover_rectangle(num_squares):
- break
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ for num_squares in range(1, 15):
+ print("Trying with size =", num_squares)
+ if cover_rectangle(num_squares):
+ break
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/cryptarithm_sat.py b/examples/python/cryptarithm_sat.py
index c4e49e0873d..0732c8df1f7 100644
--- a/examples/python/cryptarithm_sat.py
+++ b/examples/python/cryptarithm_sat.py
@@ -12,71 +12,70 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Use CP-SAT to solve a simple cryptarithmetic problem: SEND+MORE=MONEY.
-"""
+"""Use CP-SAT to solve a simple cryptarithmetic problem: SEND+MORE=MONEY."""
from absl import app
from ortools.sat.python import cp_model
def send_more_money() -> None:
- """solve the cryptarithmic puzzle SEND+MORE=MONEY."""
- model = cp_model.CpModel()
-
- # Create variables.
- # Since s is a leading digit, it can't be 0.
- s = model.new_int_var(1, 9, "s")
- e = model.new_int_var(0, 9, "e")
- n = model.new_int_var(0, 9, "n")
- d = model.new_int_var(0, 9, "d")
- # Since m is a leading digit, it can't be 0.
- m = model.new_int_var(1, 9, "m")
- o = model.new_int_var(0, 9, "o")
- r = model.new_int_var(0, 9, "r")
- y = model.new_int_var(0, 9, "y")
-
- # Create carry variables. c0 is true if the first column of addends carries
- # a 1, c2 is true if the second column carries a 1, and so on.
- c0 = model.new_bool_var("c0")
- c1 = model.new_bool_var("c1")
- c2 = model.new_bool_var("c2")
- c3 = model.new_bool_var("c3")
-
- # Force all letters to take on different values.
- model.add_all_different(s, e, n, d, m, o, r, y)
-
- # Column 0:
- model.add(c0 == m)
-
- # Column 1:
- model.add(c1 + s + m == o + 10 * c0)
-
- # Column 2:
- model.add(c2 + e + o == n + 10 * c1)
-
- # Column 3:
- model.add(c3 + n + r == e + 10 * c2)
-
- # Column 4:
- model.add(d + e == y + 10 * c3)
-
- # solve model.
- solver = cp_model.CpSolver()
- if solver.solve(model) == cp_model.OPTIMAL:
- print("Optimal solution found!")
- print("s:", solver.value(s))
- print("e:", solver.value(e))
- print("n:", solver.value(n))
- print("d:", solver.value(d))
- print("m:", solver.value(m))
- print("o:", solver.value(o))
- print("r:", solver.value(r))
- print("y:", solver.value(y))
+ """solve the cryptarithmic puzzle SEND+MORE=MONEY."""
+ model = cp_model.CpModel()
+
+ # Create variables.
+ # Since s is a leading digit, it can't be 0.
+ s = model.new_int_var(1, 9, "s")
+ e = model.new_int_var(0, 9, "e")
+ n = model.new_int_var(0, 9, "n")
+ d = model.new_int_var(0, 9, "d")
+ # Since m is a leading digit, it can't be 0.
+ m = model.new_int_var(1, 9, "m")
+ o = model.new_int_var(0, 9, "o")
+ r = model.new_int_var(0, 9, "r")
+ y = model.new_int_var(0, 9, "y")
+
+ # Create carry variables. c0 is true if the first column of addends carries
+ # a 1, c2 is true if the second column carries a 1, and so on.
+ c0 = model.new_bool_var("c0")
+ c1 = model.new_bool_var("c1")
+ c2 = model.new_bool_var("c2")
+ c3 = model.new_bool_var("c3")
+
+ # Force all letters to take on different values.
+ model.add_all_different(s, e, n, d, m, o, r, y)
+
+ # Column 0:
+ model.add(c0 == m)
+
+ # Column 1:
+ model.add(c1 + s + m == o + 10 * c0)
+
+ # Column 2:
+ model.add(c2 + e + o == n + 10 * c1)
+
+ # Column 3:
+ model.add(c3 + n + r == e + 10 * c2)
+
+ # Column 4:
+ model.add(d + e == y + 10 * c3)
+
+ # solve model.
+ solver = cp_model.CpSolver()
+ if solver.solve(model) == cp_model.OPTIMAL:
+ print("Optimal solution found!")
+ print("s:", solver.value(s))
+ print("e:", solver.value(e))
+ print("n:", solver.value(n))
+ print("d:", solver.value(d))
+ print("m:", solver.value(m))
+ print("o:", solver.value(o))
+ print("r:", solver.value(r))
+ print("y:", solver.value(y))
def main(_) -> None:
- send_more_money()
+ send_more_money()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/cvrptw_plot.py b/examples/python/cvrptw_plot.py
index 074cd5ea346..7f199428f93 100644
--- a/examples/python/cvrptw_plot.py
+++ b/examples/python/cvrptw_plot.py
@@ -13,25 +13,25 @@
# limitations under the License.
"""Capacitated Vehicle Routing Problem with Time Windows (and optional orders).
- This is a sample using the routing library python wrapper to solve a
- CVRPTW problem.
- A description of the problem can be found here:
- http://en.wikipedia.org/wiki/Vehicle_routing_problem.
- The variant which is tackled by this model includes a capacity dimension,
- time windows and optional orders, with a penalty cost if orders are not
- performed.
- To help explore the problem, two classes are provided Customers() and
- Vehicles(): used to randomly locate orders and depots, and to randomly
- generate demands, time-window constraints and vehicles.
- Distances are computed using the Great Circle distances. Distances are in km
- and times in seconds.
-
- A function for the displaying of the vehicle plan
- display_vehicle_output
-
- The optimization engine uses local search to improve solutions, first
- solutions being generated using a cheapest addition heuristic.
- Numpy and Matplotlib are required for the problem creation and display.
+This is a sample using the routing library python wrapper to solve a
+CVRPTW problem.
+A description of the problem can be found here:
+http://en.wikipedia.org/wiki/Vehicle_routing_problem.
+The variant which is tackled by this model includes a capacity dimension,
+time windows and optional orders, with a penalty cost if orders are not
+performed.
+To help explore the problem, two classes are provided Customers() and
+Vehicles(): used to randomly locate orders and depots, and to randomly
+generate demands, time-window constraints and vehicles.
+Distances are computed using the Great Circle distances. Distances are in km
+and times in seconds.
+
+A function for the displaying of the vehicle plan
+display_vehicle_output
+
+The optimization engine uses local search to improve solutions, first
+solutions being generated using a cheapest addition heuristic.
+Numpy and Matplotlib are required for the problem creation and display.
"""
import os
@@ -39,715 +39,748 @@
from matplotlib import pyplot as plt
from collections import namedtuple
from ortools.constraint_solver import pywrapcp
-from ortools.constraint_solver import routing_enums_pb2
+from ortools.routing import enums_pb2
from datetime import datetime, timedelta
-class Customers():
- """
- A class that generates and holds customers information.
-
- Randomly normally distribute a number of customers and locations within
- a region described by a rectangle. Generate a random demand for each
- customer. Generate a random time window for each customer.
- May either be initiated with the extents, as a dictionary describing
- two corners of a rectangle in latitude and longitude OR as a center
- point (lat, lon), and box_size in km. The default arguments are for a
- 10 x 10 km square centered in Sheffield).
-
- Args: extents (Optional[Dict]): A dictionary describing a rectangle in
- latitude and longitude with the keys 'llcrnrlat', 'llcrnrlon' &
- 'urcrnrlat' & 'urcrnrlat' center (Optional(Tuple): A tuple of
- (latitude, longitude) describing the centre of the rectangle. box_size
- (Optional float: The length in km of the box's sides. num_stops (int):
- The number of customers, including the depots that are placed normally
- distributed in the rectangle. min_demand (int): Lower limit on the
- randomly generated demand at each customer. max_demand (int): Upper
- limit on the randomly generated demand at each customer.
- min_tw: shortest random time window for a customer, in hours.
- max_tw: longest random time window for a customer, in hours.
- Examples: To place 100 customers randomly within 100 km x 100 km
- rectangle, centered in the default location, with a random demand of
- between 5 and 10 units: >>> customers = Customers(num_stops=100,
- box_size=100, ... min_demand=5, max_demand=10)
- alternatively, to place 75 customers in the same area with default
- arguments for demand: >>> extents = {'urcrnrlon': 0.03403, 'llcrnrlon':
- -2.98325, ... 'urcrnrlat': 54.28127, 'llcrnrlat': 52.48150} >>>
- customers = Customers(num_stops=75, extents=extents)
+class Customers:
+ """
+ A class that generates and holds customers information.
+
+ Randomly normally distribute a number of customers and locations within
+ a region described by a rectangle. Generate a random demand for each
+ customer. Generate a random time window for each customer.
+ May either be initiated with the extents, as a dictionary describing
+ two corners of a rectangle in latitude and longitude OR as a center
+ point (lat, lon), and box_size in km. The default arguments are for a
+ 10 x 10 km square centered in Sheffield).
+
+ Args: extents (Optional[Dict]): A dictionary describing a rectangle in
+ latitude and longitude with the keys 'llcrnrlat', 'llcrnrlon' &
+ 'urcrnrlat' & 'urcrnrlat' center (Optional(Tuple): A tuple of
+ (latitude, longitude) describing the centre of the rectangle. box_size
+ (Optional float: The length in km of the box's sides. num_stops (int):
+ The number of customers, including the depots that are placed normally
+ distributed in the rectangle. min_demand (int): Lower limit on the
+ randomly generated demand at each customer. max_demand (int): Upper
+ limit on the randomly generated demand at each customer.
+ min_tw: shortest random time window for a customer, in hours.
+ max_tw: longest random time window for a customer, in hours.
+ Examples: To place 100 customers randomly within 100 km x 100 km
+ rectangle, centered in the default location, with a random demand of
+ between 5 and 10 units: >>> customers = Customers(num_stops=100,
+ box_size=100, ... min_demand=5, max_demand=10)
+ alternatively, to place 75 customers in the same area with default
+ arguments for demand: >>> extents = {'urcrnrlon': 0.03403, 'llcrnrlon':
+ -2.98325, ... 'urcrnrlat': 54.28127, 'llcrnrlat': 52.48150} >>>
+ customers = Customers(num_stops=75, extents=extents)
"""
- def __init__(self,
- extents=None,
- center=(53.381393, -1.474611),
- box_size=10,
- num_stops=100,
- min_demand=0,
- max_demand=25,
- min_tw=1,
- max_tw=5):
- self.number = num_stops #: The number of customers and depots
- #: Location, a named tuple for locations.
- Location = namedtuple('Location', ['lat', 'lon'])
- if extents is not None:
- self.extents = extents #: The lower left and upper right points
- #: Location[lat,lon]: the centre point of the area.
- self.center = Location(
- extents['urcrnrlat'] - 0.5 *
- (extents['urcrnrlat'] - extents['llcrnrlat']),
- extents['urcrnrlon'] - 0.5 *
- (extents['urcrnrlon'] - extents['llcrnrlon']))
- else:
- #: Location[lat,lon]: the centre point of the area.
- (clat, clon) = self.center = Location(center[0], center[1])
- rad_earth = 6367 # km
- circ_earth = np.pi * rad_earth
- #: The lower left and upper right points
- self.extents = {
- 'llcrnrlon': (clon - 180 * box_size /
- (circ_earth * np.cos(np.deg2rad(clat)))),
- 'llcrnrlat':
- clat - 180 * box_size / circ_earth,
- 'urcrnrlon': (clon + 180 * box_size /
- (circ_earth * np.cos(np.deg2rad(clat)))),
- 'urcrnrlat':
- clat + 180 * box_size / circ_earth
- }
- # The 'name' of the stop, indexed from 0 to num_stops-1
- stops = np.array(range(0, num_stops))
- # normaly distributed random distribution of stops within the box
- stdv = 6 # the number of standard deviations 99.9% will be within +-3
- lats = (self.extents['llcrnrlat'] + np.random.randn(num_stops) *
- (self.extents['urcrnrlat'] - self.extents['llcrnrlat']) / stdv)
- lons = (self.extents['llcrnrlon'] + np.random.randn(num_stops) *
- (self.extents['urcrnrlon'] - self.extents['llcrnrlon']) / stdv)
- # uniformly distributed integer demands.
- demands = np.random.randint(min_demand, max_demand, num_stops)
-
- self.time_horizon = 24 * 60**2 # A 24 hour period.
-
- # The customers demand min_tw to max_tw hour time window for each
- # delivery
- time_windows = np.random.randint(min_tw * 3600, max_tw * 3600,
- num_stops)
- # The last time a delivery window can start
- latest_time = self.time_horizon - time_windows
- start_times = [None for o in time_windows]
- stop_times = [None for o in time_windows]
- # Make random timedeltas, nominally from the start of the day.
- for idx in range(self.number):
- stime = int(np.random.randint(0, latest_time[idx]))
- start_times[idx] = timedelta(seconds=stime)
- stop_times[idx] = (
- start_times[idx] + timedelta(seconds=int(time_windows[idx])))
- # A named tuple for the customer
- Customer = namedtuple(
- 'Customer',
- [
- 'index', # the index of the stop
- 'demand', # the demand for the stop
- 'lat', # the latitude of the stop
- 'lon', # the longitude of the stop
- 'tw_open', # timedelta window open
- 'tw_close'
- ]) # timedelta window cls
-
- self.customers = [
- Customer(idx, dem, lat, lon, tw_open, tw_close)
- for idx, dem, lat, lon, tw_open, tw_close in zip(
- stops, demands, lats, lons, start_times, stop_times)
- ]
-
- # The number of seconds needed to 'unload' 1 unit of goods.
- self.service_time_per_dem = 300 # seconds
-
- def set_manager(self, manager):
- self.manager = manager
-
- def central_start_node(self, invert=False):
- """
- Return a random starting node, with probability weighted by distance
- from the centre of the extents, so that a central starting node is
- likely.
-
- Args: invert (Optional bool): When True, a peripheral starting node is
- most likely.
-
- Returns:
- int: a node index.
-
- Examples:
- >>> customers.central_start_node(invert=True)
- 42
- """
- num_nodes = len(self.customers)
- dist = np.empty((num_nodes, 1))
- for idx_to in range(num_nodes):
- dist[idx_to] = self._haversine(self.center.lon, self.center.lat,
- self.customers[idx_to].lon,
- self.customers[idx_to].lat)
- furthest = np.max(dist)
-
- if invert:
- prob = dist * 1.0 / sum(dist)
- else:
- prob = (furthest - dist * 1.0) / sum(furthest - dist)
- indexes = np.array([range(num_nodes)])
- start_node = np.random.choice(
- indexes.flatten(), size=1, replace=True, p=prob.flatten())
- return start_node[0]
-
- def make_distance_mat(self, method='haversine'):
- """
- Return a distance matrix and make it a member of Customer, using the
- method given in the call. Currently only Haversine (GC distance) is
- implemented, but Manhattan, or using a maps API could be added here.
- Raises an AssertionError for all other methods.
-
- Args: method (Optional[str]): method of distance calculation to use. The
- Haversine formula is the only method implemented.
-
- Returns:
- Numpy array of node to node distances.
-
- Examples:
- >>> dist_mat = customers.make_distance_mat(method='haversine')
- >>> dist_mat = customers.make_distance_mat(method='manhattan')
- AssertionError
- """
- self.distmat = np.zeros((self.number, self.number))
- methods = {'haversine': self._haversine}
- assert (method in methods)
- for frm_idx in range(self.number):
- for to_idx in range(self.number):
- if frm_idx != to_idx:
- frm_c = self.customers[frm_idx]
- to_c = self.customers[to_idx]
- self.distmat[frm_idx, to_idx] = self._haversine(
- frm_c.lon, frm_c.lat, to_c.lon, to_c.lat)
- return (self.distmat)
-
- def _haversine(self, lon1, lat1, lon2, lat2):
- """
- Calculate the great circle distance between two points
- on the earth specified in decimal degrees of latitude and longitude.
- https://en.wikipedia.org/wiki/Haversine_formula
-
- Args:
- lon1: longitude of pt 1,
- lat1: latitude of pt 1,
- lon2: longitude of pt 2,
- lat2: latitude of pt 2
-
- Returns:
- the distace in km between pt1 and pt2
- """
- # convert decimal degrees to radians
- lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
-
- # haversine formula
- dlon = lon2 - lon1
- dlat = lat2 - lat1
- a = (np.sin(dlat / 2)**2 +
- np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2)**2)
- c = 2 * np.arcsin(np.sqrt(a))
-
- # 6367 km is the radius of the Earth
- km = 6367 * c
- return km
-
- def get_total_demand(self):
- """
- Return the total demand of all customers.
- """
- return (sum([c.demand for c in self.customers]))
-
- def return_dist_callback(self, **kwargs):
- """
- Return a callback function for the distance matrix.
-
- Args: **kwargs: Arbitrary keyword arguments passed on to
- make_distance_mat()
-
- Returns:
- function: dist_return(a,b) A function that takes the 'from' node
- index and the 'to' node index and returns the distance in km.
- """
- self.make_distance_mat(**kwargs)
-
- def dist_return(from_index, to_index):
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = self.manager.IndexToNode(from_index)
- to_node = self.manager.IndexToNode(to_index)
- return (self.distmat[from_node][to_node])
-
- return dist_return
-
- def return_dem_callback(self):
- """
- Return a callback function that gives the demands.
-
- Returns:
- function: dem_return(a) A function that takes the 'from' node
- index and returns the distance in km.
- """
-
- def dem_return(from_index):
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = self.manager.IndexToNode(from_index)
- return (self.customers[from_node].demand)
-
- return dem_return
-
- def zero_depot_demands(self, depot):
- """
- Zero out the demands and time windows of depot. The Depots do not have
- demands or time windows so this function clears them.
-
- Args: depot (int): index of the stop to modify into a depot.
- Examples: >>> customers.zero_depot_demands(5) >>>
- customers.customers[5].demand == 0 True
+ def __init__(
+ self,
+ extents=None,
+ center=(53.381393, -1.474611),
+ box_size=10,
+ num_stops=100,
+ min_demand=0,
+ max_demand=25,
+ min_tw=1,
+ max_tw=5,
+ ):
+ self.number = num_stops #: The number of customers and depots
+ #: Location, a named tuple for locations.
+ Location = namedtuple('Location', ['lat', 'lon'])
+ if extents is not None:
+ self.extents = extents #: The lower left and upper right points
+ #: Location[lat,lon]: the centre point of the area.
+ self.center = Location(
+ extents['urcrnrlat']
+ - 0.5 * (extents['urcrnrlat'] - extents['llcrnrlat']),
+ extents['urcrnrlon']
+ - 0.5 * (extents['urcrnrlon'] - extents['llcrnrlon']),
+ )
+ else:
+ #: Location[lat,lon]: the centre point of the area.
+ (clat, clon) = self.center = Location(center[0], center[1])
+ rad_earth = 6367 # km
+ circ_earth = np.pi * rad_earth
+ #: The lower left and upper right points
+ self.extents = {
+ 'llcrnrlon': clon - 180 * box_size / (
+ circ_earth * np.cos(np.deg2rad(clat))
+ ),
+ 'llcrnrlat': clat - 180 * box_size / circ_earth,
+ 'urcrnrlon': clon + 180 * box_size / (
+ circ_earth * np.cos(np.deg2rad(clat))
+ ),
+ 'urcrnrlat': clat + 180 * box_size / circ_earth,
+ }
+ # The 'name' of the stop, indexed from 0 to num_stops-1
+ stops = np.array(range(0, num_stops))
+ # normaly distributed random distribution of stops within the box
+ stdv = 6 # the number of standard deviations 99.9% will be within +-3
+ lats = (
+ self.extents['llcrnrlat']
+ + np.random.randn(num_stops)
+ * (self.extents['urcrnrlat'] - self.extents['llcrnrlat'])
+ / stdv
+ )
+ lons = (
+ self.extents['llcrnrlon']
+ + np.random.randn(num_stops)
+ * (self.extents['urcrnrlon'] - self.extents['llcrnrlon'])
+ / stdv
+ )
+ # uniformly distributed integer demands.
+ demands = np.random.randint(min_demand, max_demand, num_stops)
+
+ self.time_horizon = 24 * 60**2 # A 24 hour period.
+
+ # The customers demand min_tw to max_tw hour time window for each
+ # delivery
+ time_windows = np.random.randint(min_tw * 3600, max_tw * 3600, num_stops)
+ # The last time a delivery window can start
+ latest_time = self.time_horizon - time_windows
+ start_times = [None for o in time_windows]
+ stop_times = [None for o in time_windows]
+ # Make random timedeltas, nominally from the start of the day.
+ for idx in range(self.number):
+ stime = int(np.random.randint(0, latest_time[idx]))
+ start_times[idx] = timedelta(seconds=stime)
+ stop_times[idx] = start_times[idx] + timedelta(
+ seconds=int(time_windows[idx])
+ )
+ # A named tuple for the customer
+ Customer = namedtuple(
+ 'Customer',
+ [
+ 'index', # the index of the stop
+ 'demand', # the demand for the stop
+ 'lat', # the latitude of the stop
+ 'lon', # the longitude of the stop
+ 'tw_open', # timedelta window open
+ 'tw_close',
+ ],
+ ) # timedelta window cls
+
+ self.customers = [
+ Customer(idx, dem, lat, lon, tw_open, tw_close)
+ for idx, dem, lat, lon, tw_open, tw_close in zip(
+ stops, demands, lats, lons, start_times, stop_times
+ )
+ ]
+
+ # The number of seconds needed to 'unload' 1 unit of goods.
+ self.service_time_per_dem = 300 # seconds
+
+ def set_manager(self, manager):
+ self.manager = manager
+
+ def central_start_node(self, invert=False):
"""
- start_depot = self.customers[depot]
- self.customers[depot] = start_depot._replace(
- demand=0, tw_open=None, tw_close=None)
+ Return a random starting node, with probability weighted by distance
+ from the centre of the extents, so that a central starting node is
+ likely.
- def make_service_time_call_callback(self):
- """
- Return a callback function that provides the time spent servicing the
- customer. Here is it proportional to the demand given by
- self.service_time_per_dem, default 300 seconds per unit demand.
+ Args: invert (Optional bool): When True, a peripheral starting node is
+ most likely.
- Returns:
- function [dem_return(a, b)]: A function that takes the from/a node
- index and the to/b node index and returns the service time at a
+ Returns:
+ int: a node index.
- """
+ Examples:
+ >>> customers.central_start_node(invert=True)
+ 42
+ """
+ num_nodes = len(self.customers)
+ dist = np.empty((num_nodes, 1))
+ for idx_to in range(num_nodes):
+ dist[idx_to] = self._haversine(
+ self.center.lon,
+ self.center.lat,
+ self.customers[idx_to].lon,
+ self.customers[idx_to].lat,
+ )
+ furthest = np.max(dist)
+
+ if invert:
+ prob = dist * 1.0 / sum(dist)
+ else:
+ prob = (furthest - dist * 1.0) / sum(furthest - dist)
+ indexes = np.array([range(num_nodes)])
+ start_node = np.random.choice(
+ indexes.flatten(), size=1, replace=True, p=prob.flatten()
+ )
+ return start_node[0]
+
+ def make_distance_mat(self, method='haversine'):
+ """
+ Return a distance matrix and make it a member of Customer, using the
+ method given in the call. Currently only Haversine (GC distance) is
+ implemented, but Manhattan, or using a maps API could be added here.
+ Raises an AssertionError for all other methods.
- def service_time_return(a, b):
- return (self.customers[a].demand * self.service_time_per_dem)
+ Args: method (Optional[str]): method of distance calculation to use. The
+ Haversine formula is the only method implemented.
- return service_time_return
+ Returns:
+ Numpy array of node to node distances.
- def make_transit_time_callback(self, speed_kmph=10):
- """
- Creates a callback function for transit time. Assuming an average
- speed of speed_kmph
- Args:
- speed_kmph: the average speed in km/h
+ Examples:
+ >>> dist_mat = customers.make_distance_mat(method='haversine')
+ >>> dist_mat = customers.make_distance_mat(method='manhattan')
+ AssertionError
+ """
+ self.distmat = np.zeros((self.number, self.number))
+ methods = {'haversine': self._haversine}
+ assert method in methods
+ for frm_idx in range(self.number):
+ for to_idx in range(self.number):
+ if frm_idx != to_idx:
+ frm_c = self.customers[frm_idx]
+ to_c = self.customers[to_idx]
+ self.distmat[frm_idx, to_idx] = self._haversine(
+ frm_c.lon, frm_c.lat, to_c.lon, to_c.lat
+ )
+ return self.distmat
+
+ def _haversine(self, lon1, lat1, lon2, lat2):
+ """
+ Calculate the great circle distance between two points
+ on the earth specified in decimal degrees of latitude and longitude.
+ https://en.wikipedia.org/wiki/Haversine_formula
- Returns:
- function [transit_time_return(a, b)]: A function that takes the
- from/a node index and the to/b node index and returns the
- transit time from a to b.
- """
+ Args:
+ lon1: longitude of pt 1,
+ lat1: latitude of pt 1,
+ lon2: longitude of pt 2,
+ lat2: latitude of pt 2
- def transit_time_return(a, b):
- return (self.distmat[a][b] / (speed_kmph * 1.0 / 60**2))
+ Returns:
+ the distace in km between pt1 and pt2
+ """
+ # convert decimal degrees to radians
+ lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
+
+ # haversine formula
+ dlon = lon2 - lon1
+ dlat = lat2 - lat1
+ a = (
+ np.sin(dlat / 2) ** 2
+ + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2
+ )
+ c = 2 * np.arcsin(np.sqrt(a))
+
+ # 6367 km is the radius of the Earth
+ km = 6367 * c
+ return km
+
+ def get_total_demand(self):
+ """
+ Return the total demand of all customers.
+ """
+ return sum([c.demand for c in self.customers])
- return transit_time_return
+ def return_dist_callback(self, **kwargs):
+ """
+ Return a callback function for the distance matrix.
+ Args: **kwargs: Arbitrary keyword arguments passed on to
+ make_distance_mat()
-class Vehicles():
+ Returns:
+ function: dist_return(a,b) A function that takes the 'from' node
+ index and the 'to' node index and returns the distance in km.
"""
- A Class to create and hold vehicle information.
-
- The Vehicles in a CVRPTW problem service the customers and belong to a
- depot. The class Vehicles creates a list of named tuples describing the
- Vehicles. The main characteristics are the vehicle capacity, fixed cost,
- and cost per km. The fixed cost of using a certain type of vehicles can be
- higher or lower than others. If a vehicle is used, i.e. this vehicle serves
- at least one node, then this cost is added to the objective function.
-
- Note:
- If numpy arrays are given for capacity and cost, then they must be of
- the same length, and the number of vehicles are inferred from them.
- If scalars are given, the fleet is homogeneous, and the number of
- vehicles is determined by number.
-
- Args: capacity (scalar or numpy array): The integer capacity of demand
- units. cost (scalar or numpy array): The fixed cost of the vehicle. number
- (Optional [int]): The number of vehicles in a homogeneous fleet.
- """
+ self.make_distance_mat(**kwargs)
- def __init__(self, capacity=100, cost=100, number=None):
-
- Vehicle = namedtuple('Vehicle', ['index', 'capacity', 'cost'])
-
- if number is None:
- self.number = np.size(capacity)
- else:
- self.number = number
- idxs = np.array(range(0, self.number))
-
- if np.isscalar(capacity):
- capacities = capacity * np.ones_like(idxs)
- elif np.size(capacity) != self.number:
- print('capacity is neither scalar, nor the same size as num!')
- else:
- capacities = capacity
-
- if np.isscalar(cost):
- costs = cost * np.ones_like(idxs)
- elif np.size(cost) != self.number:
- print(np.size(cost))
- print('cost is neither scalar, nor the same size as num!')
- else:
- costs = cost
-
- self.vehicles = [
- Vehicle(idx, capacity, cost)
- for idx, capacity, cost in zip(idxs, capacities, costs)
- ]
-
- def get_total_capacity(self):
- return (sum([c.capacity for c in self.vehicles]))
-
- def return_starting_callback(self, customers, sameStartFinish=False):
- # create a different starting and finishing depot for each vehicle
- self.starts = [
- int(customers.central_start_node()) for o in range(self.number)
- ]
- if sameStartFinish:
- self.ends = self.starts
- else:
- self.ends = [
- int(customers.central_start_node(invert=True))
- for o in range(self.number)
- ]
- # the depots will not have demands, so zero them.
- for depot in self.starts:
- customers.zero_depot_demands(depot)
- for depot in self.ends:
- customers.zero_depot_demands(depot)
-
- def start_return(v):
- return (self.starts[v])
-
- return start_return
+ def dist_return(from_index, to_index):
+ # Convert from routing variable Index to distance matrix NodeIndex.
+ from_node = self.manager.IndexToNode(from_index)
+ to_node = self.manager.IndexToNode(to_index)
+ return self.distmat[from_node][to_node]
+ return dist_return
-def discrete_cmap(N, base_cmap=None):
+ def return_dem_callback(self):
"""
- Create an N-bin discrete colormap from the specified input map
+ Return a callback function that gives the demands.
+
+ Returns:
+ function: dem_return(a) A function that takes the 'from' node
+ index and returns the distance in km.
"""
- # Note that if base_cmap is a string or None, you can simply do
- # return plt.cm.get_cmap(base_cmap, N)
- # The following works for string, None, or a colormap instance:
- base = plt.cm.get_cmap(base_cmap)
- color_list = base(np.linspace(0, 1, N))
- cmap_name = base.name + str(N)
- return base.from_list(cmap_name, color_list, N)
+ def dem_return(from_index):
+ # Convert from routing variable Index to distance matrix NodeIndex.
+ from_node = self.manager.IndexToNode(from_index)
+ return self.customers[from_node].demand
+ return dem_return
-def vehicle_output_string(manager, routing, plan):
+ def zero_depot_demands(self, depot):
+ """
+ Zero out the demands and time windows of depot. The Depots do not have
+ demands or time windows so this function clears them.
+
+ Args: depot (int): index of the stop to modify into a depot.
+ Examples: >>> customers.zero_depot_demands(5) >>>
+ customers.customers[5].demand == 0 True
"""
- Return a string displaying the output of the routing instance and
- assignment (plan).
+ start_depot = self.customers[depot]
+ self.customers[depot] = start_depot._replace(
+ demand=0, tw_open=None, tw_close=None
+ )
- Args: routing (ortools.constraint_solver.pywrapcp.RoutingModel): routing.
- plan (ortools.constraint_solver.pywrapcp.Assignment): the assignment.
+ def make_service_time_call_callback(self):
+ """
+ Return a callback function that provides the time spent servicing the
+ customer. Here is it proportional to the demand given by
+ self.service_time_per_dem, default 300 seconds per unit demand.
Returns:
- (string) plan_output: describing each vehicle's plan.
-
- (List) dropped: list of dropped orders.
+ function [dem_return(a, b)]: A function that takes the from/a node
+ index and the to/b node index and returns the service time at a
"""
- dropped = []
- for order in range(routing.Size()):
- if (plan.Value(routing.NextVar(order)) == order):
- dropped.append(str(order))
-
- capacity_dimension = routing.GetDimensionOrDie('Capacity')
- time_dimension = routing.GetDimensionOrDie('Time')
- plan_output = ''
-
- for route_number in range(routing.vehicles()):
- order = routing.Start(route_number)
- plan_output += 'Route {0}:'.format(route_number)
- if routing.IsEnd(plan.Value(routing.NextVar(order))):
- plan_output += ' Empty \n'
- else:
- while True:
- load_var = capacity_dimension.CumulVar(order)
- time_var = time_dimension.CumulVar(order)
- node = manager.IndexToNode(order)
- plan_output += \
- ' {node} Load({load}) Time({tmin}, {tmax}) -> '.format(
- node=node,
- load=plan.Value(load_var),
- tmin=str(timedelta(seconds=plan.Min(time_var))),
- tmax=str(timedelta(seconds=plan.Max(time_var))))
-
- if routing.IsEnd(order):
- plan_output += ' EndRoute {0}. \n'.format(route_number)
- break
- order = plan.Value(routing.NextVar(order))
- plan_output += '\n'
-
- return (plan_output, dropped)
+ def service_time_return(a, b):
+ return self.customers[a].demand * self.service_time_per_dem
-def build_vehicle_route(manager, routing, plan, customers, veh_number):
- """
- Build a route for a vehicle by starting at the strat node and
- continuing to the end node.
+ return service_time_return
- Args: routing (ortools.constraint_solver.pywrapcp.RoutingModel): routing.
- plan (ortools.constraint_solver.pywrapcp.Assignment): the assignment.
- customers (Customers): the customers instance. veh_number (int): index of
- the vehicle
+ def make_transit_time_callback(self, speed_kmph=10):
+ """
+ Creates a callback function for transit time. Assuming an average
+ speed of speed_kmph
+ Args:
+ speed_kmph: the average speed in km/h
Returns:
- (List) route: indexes of the customers for vehicle veh_number
+ function [transit_time_return(a, b)]: A function that takes the
+ from/a node index and the to/b node index and returns the
+ transit time from a to b.
"""
- veh_used = routing.IsVehicleUsed(plan, veh_number)
- print('Vehicle {0} is used {1}'.format(veh_number, veh_used))
- if veh_used:
- route = []
- node = routing.Start(veh_number) # Get the starting node index
- route.append(customers.customers[manager.IndexToNode(node)])
- while not routing.IsEnd(node):
- route.append(customers.customers[manager.IndexToNode(node)])
- node = plan.Value(routing.NextVar(node))
-
- route.append(customers.customers[manager.IndexToNode(node)])
- return route
+
+ def transit_time_return(a, b):
+ return self.distmat[a][b] / (speed_kmph * 1.0 / 60**2)
+
+ return transit_time_return
+
+
+class Vehicles:
+ """
+ A Class to create and hold vehicle information.
+
+ The Vehicles in a CVRPTW problem service the customers and belong to a
+ depot. The class Vehicles creates a list of named tuples describing the
+ Vehicles. The main characteristics are the vehicle capacity, fixed cost,
+ and cost per km. The fixed cost of using a certain type of vehicles can be
+ higher or lower than others. If a vehicle is used, i.e. this vehicle serves
+ at least one node, then this cost is added to the objective function.
+
+ Note:
+ If numpy arrays are given for capacity and cost, then they must be of
+ the same length, and the number of vehicles are inferred from them.
+ If scalars are given, the fleet is homogeneous, and the number of
+ vehicles is determined by number.
+
+ Args: capacity (scalar or numpy array): The integer capacity of demand
+ units. cost (scalar or numpy array): The fixed cost of the vehicle. number
+ (Optional [int]): The number of vehicles in a homogeneous fleet.
+ """
+
+ def __init__(self, capacity=100, cost=100, number=None):
+
+ Vehicle = namedtuple('Vehicle', ['index', 'capacity', 'cost'])
+
+ if number is None:
+ self.number = np.size(capacity)
else:
- return None
+ self.number = number
+ idxs = np.array(range(0, self.number))
+ if np.isscalar(capacity):
+ capacities = capacity * np.ones_like(idxs)
+ elif np.size(capacity) != self.number:
+ print('capacity is neither scalar, nor the same size as num!')
+ else:
+ capacities = capacity
-def plot_vehicle_routes(veh_route, ax1, customers, vehicles):
- """
- Plot the vehicle routes on matplotlib axis ax1.
+ if np.isscalar(cost):
+ costs = cost * np.ones_like(idxs)
+ elif np.size(cost) != self.number:
+ print(np.size(cost))
+ print('cost is neither scalar, nor the same size as num!')
+ else:
+ costs = cost
+
+ self.vehicles = [
+ Vehicle(idx, capacity, cost)
+ for idx, capacity, cost in zip(idxs, capacities, costs)
+ ]
+
+ def get_total_capacity(self):
+ return sum([c.capacity for c in self.vehicles])
+
+ def return_starting_callback(self, customers, sameStartFinish=False):
+ # create a different starting and finishing depot for each vehicle
+ self.starts = [
+ int(customers.central_start_node()) for o in range(self.number)
+ ]
+ if sameStartFinish:
+ self.ends = self.starts
+ else:
+ self.ends = [
+ int(customers.central_start_node(invert=True))
+ for o in range(self.number)
+ ]
+ # the depots will not have demands, so zero them.
+ for depot in self.starts:
+ customers.zero_depot_demands(depot)
+ for depot in self.ends:
+ customers.zero_depot_demands(depot)
+
+ def start_return(v):
+ return self.starts[v]
- Args: veh_route (dict): a dictionary of routes keyed by vehicle idx. ax1
- (matplotlib.axes._subplots.AxesSubplot): Matplotlib axes customers
- (Customers): the customers instance. vehicles (Vehicles): the vehicles
- instance.
+ return start_return
+
+
+def discrete_cmap(N, base_cmap=None):
"""
- veh_used = [v for v in veh_route if veh_route[v] is not None]
-
- cmap = discrete_cmap(vehicles.number + 2, 'nipy_spectral')
-
- for veh_number in veh_used:
-
- lats, lons = zip(*[(c.lat, c.lon) for c in veh_route[veh_number]])
- lats = np.array(lats)
- lons = np.array(lons)
- s_dep = customers.customers[vehicles.starts[veh_number]]
- s_fin = customers.customers[vehicles.ends[veh_number]]
- ax1.annotate(
- 'v({veh}) S @ {node}'.format(
- veh=veh_number, node=vehicles.starts[veh_number]),
- xy=(s_dep.lon, s_dep.lat),
- xytext=(10, 10),
- xycoords='data',
- textcoords='offset points',
- arrowprops=dict(
- arrowstyle='->',
- connectionstyle='angle3,angleA=90,angleB=0',
- shrinkA=0.05),
- )
- ax1.annotate(
- 'v({veh}) F @ {node}'.format(
- veh=veh_number, node=vehicles.ends[veh_number]),
- xy=(s_fin.lon, s_fin.lat),
- xytext=(10, -20),
- xycoords='data',
- textcoords='offset points',
- arrowprops=dict(
- arrowstyle='->',
- connectionstyle='angle3,angleA=-90,angleB=0',
- shrinkA=0.05),
+ Create an N-bin discrete colormap from the specified input map
+ """
+ # Note that if base_cmap is a string or None, you can simply do
+ # return plt.cm.get_cmap(base_cmap, N)
+ # The following works for string, None, or a colormap instance:
+
+ base = plt.cm.get_cmap(base_cmap)
+ color_list = base(np.linspace(0, 1, N))
+ cmap_name = base.name + str(N)
+ return base.from_list(cmap_name, color_list, N)
+
+
+def vehicle_output_string(manager, routing, plan):
+ """
+ Return a string displaying the output of the routing instance and
+ assignment (plan).
+
+ Args: routing (ortools.constraint_solver.pywrapcp.RoutingModel): routing.
+ plan (ortools.constraint_solver.pywrapcp.Assignment): the assignment.
+
+ Returns:
+ (string) plan_output: describing each vehicle's plan.
+
+ (List) dropped: list of dropped orders.
+
+ """
+ dropped = []
+ for order in range(routing.Size()):
+ if plan.Value(routing.NextVar(order)) == order:
+ dropped.append(str(order))
+
+ capacity_dimension = routing.GetDimensionOrDie('Capacity')
+ time_dimension = routing.GetDimensionOrDie('Time')
+ plan_output = ''
+
+ for route_number in range(routing.vehicles()):
+ order = routing.Start(route_number)
+ plan_output += 'Route {0}:'.format(route_number)
+ if routing.IsEnd(plan.Value(routing.NextVar(order))):
+ plan_output += ' Empty \n'
+ else:
+ while True:
+ load_var = capacity_dimension.CumulVar(order)
+ time_var = time_dimension.CumulVar(order)
+ node = manager.IndexToNode(order)
+ plan_output += ' {node} Load({load}) Time({tmin}, {tmax}) -> '.format(
+ node=node,
+ load=plan.Value(load_var),
+ tmin=str(timedelta(seconds=plan.Min(time_var))),
+ tmax=str(timedelta(seconds=plan.Max(time_var))),
)
- ax1.plot(lons, lats, 'o', mfc=cmap(veh_number + 1))
- ax1.quiver(
- lons[:-1],
- lats[:-1],
- lons[1:] - lons[:-1],
- lats[1:] - lats[:-1],
- scale_units='xy',
- angles='xy',
- scale=1,
- color=cmap(veh_number + 1))
+
+ if routing.IsEnd(order):
+ plan_output += ' EndRoute {0}. \n'.format(route_number)
+ break
+ order = plan.Value(routing.NextVar(order))
+ plan_output += '\n'
+
+ return (plan_output, dropped)
+
+
+def build_vehicle_route(manager, routing, plan, customers, veh_number):
+ """
+ Build a route for a vehicle by starting at the strat node and
+ continuing to the end node.
+
+ Args: routing (ortools.constraint_solver.pywrapcp.RoutingModel): routing.
+ plan (ortools.constraint_solver.pywrapcp.Assignment): the assignment.
+ customers (Customers): the customers instance. veh_number (int): index of
+ the vehicle
+
+ Returns:
+ (List) route: indexes of the customers for vehicle veh_number
+ """
+ veh_used = routing.IsVehicleUsed(plan, veh_number)
+ print('Vehicle {0} is used {1}'.format(veh_number, veh_used))
+ if veh_used:
+ route = []
+ node = routing.Start(veh_number) # Get the starting node index
+ route.append(customers.customers[manager.IndexToNode(node)])
+ while not routing.IsEnd(node):
+ route.append(customers.customers[manager.IndexToNode(node)])
+ node = plan.Value(routing.NextVar(node))
+
+ route.append(customers.customers[manager.IndexToNode(node)])
+ return route
+ else:
+ return None
+
+
+def plot_vehicle_routes(veh_route, ax1, customers, vehicles):
+ """
+ Plot the vehicle routes on matplotlib axis ax1.
+
+ Args: veh_route (dict): a dictionary of routes keyed by vehicle idx. ax1
+ (matplotlib.axes._subplots.AxesSubplot): Matplotlib axes customers
+ (Customers): the customers instance. vehicles (Vehicles): the vehicles
+ instance.
+ """
+ veh_used = [v for v in veh_route if veh_route[v] is not None]
+
+ cmap = discrete_cmap(vehicles.number + 2, 'nipy_spectral')
+
+ for veh_number in veh_used:
+
+ lats, lons = zip(*[(c.lat, c.lon) for c in veh_route[veh_number]])
+ lats = np.array(lats)
+ lons = np.array(lons)
+ s_dep = customers.customers[vehicles.starts[veh_number]]
+ s_fin = customers.customers[vehicles.ends[veh_number]]
+ ax1.annotate(
+ 'v({veh}) S @ {node}'.format(
+ veh=veh_number, node=vehicles.starts[veh_number]
+ ),
+ xy=(s_dep.lon, s_dep.lat),
+ xytext=(10, 10),
+ xycoords='data',
+ textcoords='offset points',
+ arrowprops=dict(
+ arrowstyle='->',
+ connectionstyle='angle3,angleA=90,angleB=0',
+ shrinkA=0.05,
+ ),
+ )
+ ax1.annotate(
+ 'v({veh}) F @ {node}'.format(
+ veh=veh_number, node=vehicles.ends[veh_number]
+ ),
+ xy=(s_fin.lon, s_fin.lat),
+ xytext=(10, -20),
+ xycoords='data',
+ textcoords='offset points',
+ arrowprops=dict(
+ arrowstyle='->',
+ connectionstyle='angle3,angleA=-90,angleB=0',
+ shrinkA=0.05,
+ ),
+ )
+ ax1.plot(lons, lats, 'o', mfc=cmap(veh_number + 1))
+ ax1.quiver(
+ lons[:-1],
+ lats[:-1],
+ lons[1:] - lons[:-1],
+ lats[1:] - lats[:-1],
+ scale_units='xy',
+ angles='xy',
+ scale=1,
+ color=cmap(veh_number + 1),
+ )
def main():
- # Create a set of customer, (and depot) stops.
- customers = Customers(
- num_stops=50,
- min_demand=1,
- max_demand=15,
- box_size=40,
- min_tw=3,
- max_tw=6)
-
- # Create a list of inhomgenious vehicle capacities as integer units.
- capacity = [50, 75, 100, 125, 150, 175, 200, 250]
-
- # Create a list of inhomogeneous fixed vehicle costs.
- cost = [int(100 + 2 * np.sqrt(c)) for c in capacity]
-
- # Create a set of vehicles, the number set by the length of capacity.
- vehicles = Vehicles(capacity=capacity, cost=cost)
-
- # check to see that the problem is feasible, if we don't have enough
- # vehicles to cover the demand, there is no point in going further.
- assert (customers.get_total_demand() < vehicles.get_total_capacity())
-
- # Set the starting nodes, and create a callback fn for the starting node.
- start_fn = vehicles.return_starting_callback(
- customers, sameStartFinish=False)
-
- # Create the routing index manager.
- manager = pywrapcp.RoutingIndexManager(
- customers.number, # int number
- vehicles.number, # int number
- vehicles.starts, # List of int start depot
- vehicles.ends) # List of int end depot
-
- customers.set_manager(manager)
-
- # Set model parameters
- model_parameters = pywrapcp.DefaultRoutingModelParameters()
-
- # The solver parameters can be accessed from the model parameters. For example :
- # model_parameters.solver_parameters.CopyFrom(
- # pywrapcp.Solver.DefaultSolverParameters())
- # model_parameters.solver_parameters.trace_propagation = True
-
- # Make the routing model instance.
- routing = pywrapcp.RoutingModel(manager, model_parameters)
-
- parameters = pywrapcp.DefaultRoutingSearchParameters()
- # Setting first solution heuristic (cheapest addition).
- parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
- # Routing: forbids use of TSPOpt neighborhood, (this is the default behaviour)
- parameters.local_search_operators.use_tsp_opt = pywrapcp.BOOL_FALSE
- # Disabling Large Neighborhood Search, (this is the default behaviour)
- parameters.local_search_operators.use_path_lns = pywrapcp.BOOL_FALSE
- parameters.local_search_operators.use_inactive_lns = pywrapcp.BOOL_FALSE
-
- parameters.time_limit.seconds = 10
- parameters.use_full_propagation = True
- #parameters.log_search = True
-
- # Create callback fns for distances, demands, service and transit-times.
- dist_fn = customers.return_dist_callback()
- dist_fn_index = routing.RegisterTransitCallback(dist_fn)
-
- dem_fn = customers.return_dem_callback()
- dem_fn_index = routing.RegisterUnaryTransitCallback(dem_fn)
-
- # Create and register a transit callback.
- serv_time_fn = customers.make_service_time_call_callback()
- transit_time_fn = customers.make_transit_time_callback()
- def tot_time_fn(from_index, to_index):
- """
- The time function we want is both transit time and service time.
- """
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return serv_time_fn(from_node, to_node) + transit_time_fn(from_node, to_node)
-
- tot_time_fn_index = routing.RegisterTransitCallback(tot_time_fn)
-
- # Set the cost function (distance callback) for each arc, homogeneous for
- # all vehicles.
- routing.SetArcCostEvaluatorOfAllVehicles(dist_fn_index)
-
- # Set vehicle costs for each vehicle, not homogeneous.
- for veh in vehicles.vehicles:
- routing.SetFixedCostOfVehicle(veh.cost, int(veh.index))
-
- # Add a dimension for vehicle capacities
- null_capacity_slack = 0
- routing.AddDimensionWithVehicleCapacity(
- dem_fn_index, # demand callback
- null_capacity_slack,
- capacity, # capacity array
- True,
- 'Capacity')
- # Add a dimension for time and a limit on the total time_horizon
- routing.AddDimension(
- tot_time_fn_index, # total time function callback
- customers.time_horizon,
- customers.time_horizon,
- True,
- 'Time')
-
- time_dimension = routing.GetDimensionOrDie('Time')
- for cust in customers.customers:
- if cust.tw_open is not None:
- time_dimension.CumulVar(manager.NodeToIndex(cust.index)).SetRange(
- cust.tw_open.seconds, cust.tw_close.seconds)
+ # Create a set of customer, (and depot) stops.
+ customers = Customers(
+ num_stops=50, min_demand=1, max_demand=15, box_size=40, min_tw=3, max_tw=6
+ )
+
+ # Create a list of inhomgenious vehicle capacities as integer units.
+ capacity = [50, 75, 100, 125, 150, 175, 200, 250]
+
+ # Create a list of inhomogeneous fixed vehicle costs.
+ cost = [int(100 + 2 * np.sqrt(c)) for c in capacity]
+
+ # Create a set of vehicles, the number set by the length of capacity.
+ vehicles = Vehicles(capacity=capacity, cost=cost)
+
+ # check to see that the problem is feasible, if we don't have enough
+ # vehicles to cover the demand, there is no point in going further.
+ assert customers.get_total_demand() < vehicles.get_total_capacity()
+
+ # Set the starting nodes, and create a callback fn for the starting node.
+ start_fn = vehicles.return_starting_callback(customers, sameStartFinish=False)
+
+ # Create the routing index manager.
+ manager = pywrapcp.RoutingIndexManager(
+ customers.number, # int number
+ vehicles.number, # int number
+ vehicles.starts, # List of int start depot
+ vehicles.ends,
+ ) # List of int end depot
+
+ customers.set_manager(manager)
+
+ # Set model parameters
+ model_parameters = pywrapcp.DefaultRoutingModelParameters()
+
+ # The solver parameters can be accessed from the model parameters. For example :
+ # model_parameters.solver_parameters.CopyFrom(
+ # pywrapcp.Solver.DefaultSolverParameters())
+ # model_parameters.solver_parameters.trace_propagation = True
+
+ # Make the routing model instance.
+ routing = pywrapcp.RoutingModel(manager, model_parameters)
+
+ parameters = pywrapcp.DefaultRoutingSearchParameters()
+ # Setting first solution heuristic (cheapest addition).
+ parameters.first_solution_strategy = (
+ enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
+ )
+ # Routing: forbids use of TSPOpt neighborhood, (this is the default behaviour)
+ parameters.local_search_operators.use_tsp_opt = pywrapcp.BOOL_FALSE
+ # Disabling Large Neighborhood Search, (this is the default behaviour)
+ parameters.local_search_operators.use_path_lns = pywrapcp.BOOL_FALSE
+ parameters.local_search_operators.use_inactive_lns = pywrapcp.BOOL_FALSE
+
+ parameters.time_limit.seconds = 10
+ parameters.use_full_propagation = True
+ # parameters.log_search = True
+
+ # Create callback fns for distances, demands, service and transit-times.
+ dist_fn = customers.return_dist_callback()
+ dist_fn_index = routing.RegisterTransitCallback(dist_fn)
+
+ dem_fn = customers.return_dem_callback()
+ dem_fn_index = routing.RegisterUnaryTransitCallback(dem_fn)
+
+ # Create and register a transit callback.
+ serv_time_fn = customers.make_service_time_call_callback()
+ transit_time_fn = customers.make_transit_time_callback()
+
+ def tot_time_fn(from_index, to_index):
+ """
+ The time function we want is both transit time and service time.
"""
+ # Convert from routing variable Index to distance matrix NodeIndex.
+ from_node = manager.IndexToNode(from_index)
+ to_node = manager.IndexToNode(to_index)
+ return serv_time_fn(from_node, to_node) + transit_time_fn(
+ from_node, to_node
+ )
+
+ tot_time_fn_index = routing.RegisterTransitCallback(tot_time_fn)
+
+ # Set the cost function (distance callback) for each arc, homogeneous for
+ # all vehicles.
+ routing.SetArcCostEvaluatorOfAllVehicles(dist_fn_index)
+
+ # Set vehicle costs for each vehicle, not homogeneous.
+ for veh in vehicles.vehicles:
+ routing.SetFixedCostOfVehicle(veh.cost, int(veh.index))
+
+ # Add a dimension for vehicle capacities
+ null_capacity_slack = 0
+ routing.AddDimensionWithVehicleCapacity(
+ dem_fn_index, # demand callback
+ null_capacity_slack,
+ capacity, # capacity array
+ True,
+ 'Capacity',
+ )
+ # Add a dimension for time and a limit on the total time_horizon
+ routing.AddDimension(
+ tot_time_fn_index, # total time function callback
+ customers.time_horizon,
+ customers.time_horizon,
+ True,
+ 'Time',
+ )
+
+ time_dimension = routing.GetDimensionOrDie('Time')
+ for cust in customers.customers:
+ if cust.tw_open is not None:
+ time_dimension.CumulVar(manager.NodeToIndex(cust.index)).SetRange(
+ cust.tw_open.seconds, cust.tw_close.seconds
+ )
+ """
To allow the dropping of orders, we add disjunctions to all the customer
nodes. Each disjunction is a list of 1 index, which allows that customer to
be active or not, with a penalty if not. The penalty should be larger
than the cost of servicing that customer, or it will always be dropped!
"""
- # To add disjunctions just to the customers, make a list of non-depots.
- non_depot = set(range(customers.number))
- non_depot.difference_update(vehicles.starts)
- non_depot.difference_update(vehicles.ends)
- penalty = 400000 # The cost for dropping a node from the plan.
- nodes = [routing.AddDisjunction([manager.NodeToIndex(c)], penalty) for c in non_depot]
-
- # This is how you would implement partial routes if you already knew part
- # of a feasible solution for example:
- # partial = np.random.choice(list(non_depot), size=(4,5), replace=False)
-
- # routing.CloseModel()
- # partial_list = [partial[0,:].tolist(),
- # partial[1,:].tolist(),
- # partial[2,:].tolist(),
- # partial[3,:].tolist(),
- # [],[],[],[]]
- # print(routing.ApplyLocksToAllVehicles(partial_list, False))
-
- # Solve the problem !
- assignment = routing.SolveWithParameters(parameters)
-
- # The rest is all optional for saving, printing or plotting the solution.
- if assignment:
- ## save the assignment, (Google Protobuf format)
- #save_file_base = os.path.realpath(__file__).split('.')[0]
- #if routing.WriteAssignment(save_file_base + '_assignment.ass'):
- # print('succesfully wrote assignment to file ' + save_file_base +
- # '_assignment.ass')
-
- print('The Objective Value is {0}'.format(assignment.ObjectiveValue()))
-
- plan_output, dropped = vehicle_output_string(manager, routing, assignment)
- print(plan_output)
- print('dropped nodes: ' + ', '.join(dropped))
-
- # you could print debug information like this:
- # print(routing.DebugOutputAssignment(assignment, 'Capacity'))
-
- vehicle_routes = {}
- for veh in range(vehicles.number):
- vehicle_routes[veh] = build_vehicle_route(manager, routing, assignment,
- customers, veh)
-
- # Plotting of the routes in matplotlib.
- fig = plt.figure()
- ax = fig.add_subplot(111)
- # Plot all the nodes as black dots.
- clon, clat = zip(*[(c.lon, c.lat) for c in customers.customers])
- ax.plot(clon, clat, 'k.')
- # plot the routes as arrows
- plot_vehicle_routes(vehicle_routes, ax, customers, vehicles)
- plt.show()
-
- else:
- print('No assignment')
+ # To add disjunctions just to the customers, make a list of non-depots.
+ non_depot = set(range(customers.number))
+ non_depot.difference_update(vehicles.starts)
+ non_depot.difference_update(vehicles.ends)
+ penalty = 400000 # The cost for dropping a node from the plan.
+ nodes = [
+ routing.AddDisjunction([manager.NodeToIndex(c)], penalty)
+ for c in non_depot
+ ]
+
+ # This is how you would implement partial routes if you already knew part
+ # of a feasible solution for example:
+ # partial = np.random.choice(list(non_depot), size=(4,5), replace=False)
+
+ # routing.CloseModel()
+ # partial_list = [partial[0,:].tolist(),
+ # partial[1,:].tolist(),
+ # partial[2,:].tolist(),
+ # partial[3,:].tolist(),
+ # [],[],[],[]]
+ # print(routing.ApplyLocksToAllVehicles(partial_list, False))
+
+ # Solve the problem !
+ assignment = routing.SolveWithParameters(parameters)
+
+ # The rest is all optional for saving, printing or plotting the solution.
+ if assignment:
+ ## save the assignment, (Google Protobuf format)
+ # save_file_base = os.path.realpath(__file__).split('.')[0]
+ # if routing.WriteAssignment(save_file_base + '_assignment.ass'):
+ # print('succesfully wrote assignment to file ' + save_file_base +
+ # '_assignment.ass')
+
+ print('The Objective Value is {0}'.format(assignment.ObjectiveValue()))
+
+ plan_output, dropped = vehicle_output_string(manager, routing, assignment)
+ print(plan_output)
+ print('dropped nodes: ' + ', '.join(dropped))
+
+ # you could print debug information like this:
+ # print(routing.DebugOutputAssignment(assignment, 'Capacity'))
+
+ vehicle_routes = {}
+ for veh in range(vehicles.number):
+ vehicle_routes[veh] = build_vehicle_route(
+ manager, routing, assignment, customers, veh
+ )
+
+ # Plotting of the routes in matplotlib.
+ fig = plt.figure()
+ ax = fig.add_subplot(111)
+ # Plot all the nodes as black dots.
+ clon, clat = zip(*[(c.lon, c.lat) for c in customers.customers])
+ ax.plot(clon, clat, 'k.')
+ # plot the routes as arrows
+ plot_vehicle_routes(vehicle_routes, ax, customers, vehicles)
+ plt.show()
+
+ else:
+ print('No assignment')
if __name__ == '__main__':
- main()
+ main()
diff --git a/examples/python/flexible_job_shop_sat.py b/examples/python/flexible_job_shop_sat.py
index aa617420c4c..106aff453b8 100644
--- a/examples/python/flexible_job_shop_sat.py
+++ b/examples/python/flexible_job_shop_sat.py
@@ -31,175 +31,175 @@
class SolutionPrinter(cp_model.CpSolverSolutionCallback):
- """Print intermediate solutions."""
+ """Print intermediate solutions."""
- def __init__(self) -> None:
- cp_model.CpSolverSolutionCallback.__init__(self)
- self.__solution_count = 0
+ def __init__(self) -> None:
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self.__solution_count = 0
- def on_solution_callback(self) -> None:
- """Called at each new solution."""
- print(
- f"Solution {self.__solution_count}, time = {self.wall_time} s,"
- f" objective = {self.objective_value}"
- )
- self.__solution_count += 1
+ def on_solution_callback(self) -> None:
+ """Called at each new solution."""
+ print(
+ f"Solution {self.__solution_count}, time = {self.wall_time} s,"
+ f" objective = {self.objective_value}"
+ )
+ self.__solution_count += 1
def flexible_jobshop() -> None:
- """solve a small flexible jobshop problem."""
- # Data part.
- jobs = [ # task = (processing_time, machine_id)
- [ # Job 0
- [(3, 0), (1, 1), (5, 2)], # task 0 with 3 alternatives
- [(2, 0), (4, 1), (6, 2)], # task 1 with 3 alternatives
- [(2, 0), (3, 1), (1, 2)], # task 2 with 3 alternatives
- ],
- [ # Job 1
- [(2, 0), (3, 1), (4, 2)],
- [(1, 0), (5, 1), (4, 2)],
- [(2, 0), (1, 1), (4, 2)],
- ],
- [ # Job 2
- [(2, 0), (1, 1), (4, 2)],
- [(2, 0), (3, 1), (4, 2)],
- [(3, 0), (1, 1), (5, 2)],
- ],
- ]
-
- num_jobs = len(jobs)
- all_jobs = range(num_jobs)
-
- num_machines = 3
- all_machines = range(num_machines)
-
- # Model the flexible jobshop problem.
- model = cp_model.CpModel()
-
- horizon = 0
- for job in jobs:
- for task in job:
- max_task_duration = 0
- for alternative in task:
- max_task_duration = max(max_task_duration, alternative[0])
- horizon += max_task_duration
-
- print(f"Horizon = {horizon}")
-
- # Global storage of variables.
- intervals_per_resources = collections.defaultdict(list)
- starts = {} # indexed by (job_id, task_id).
- presences = {} # indexed by (job_id, task_id, alt_id).
- job_ends: list[cp_model.IntVar] = []
-
- # Scan the jobs and create the relevant variables and intervals.
+ """solve a small flexible jobshop problem."""
+ # Data part.
+ jobs = [ # task = (processing_time, machine_id)
+ [ # Job 0
+ [(3, 0), (1, 1), (5, 2)], # task 0 with 3 alternatives
+ [(2, 0), (4, 1), (6, 2)], # task 1 with 3 alternatives
+ [(2, 0), (3, 1), (1, 2)], # task 2 with 3 alternatives
+ ],
+ [ # Job 1
+ [(2, 0), (3, 1), (4, 2)],
+ [(1, 0), (5, 1), (4, 2)],
+ [(2, 0), (1, 1), (4, 2)],
+ ],
+ [ # Job 2
+ [(2, 0), (1, 1), (4, 2)],
+ [(2, 0), (3, 1), (4, 2)],
+ [(3, 0), (1, 1), (5, 2)],
+ ],
+ ]
+
+ num_jobs = len(jobs)
+ all_jobs = range(num_jobs)
+
+ num_machines = 3
+ all_machines = range(num_machines)
+
+ # Model the flexible jobshop problem.
+ model = cp_model.CpModel()
+
+ horizon = 0
+ for job in jobs:
+ for task in job:
+ max_task_duration = 0
+ for alternative in task:
+ max_task_duration = max(max_task_duration, alternative[0])
+ horizon += max_task_duration
+
+ print(f"Horizon = {horizon}")
+
+ # Global storage of variables.
+ intervals_per_resources = collections.defaultdict(list)
+ starts = {} # indexed by (job_id, task_id).
+ presences = {} # indexed by (job_id, task_id, alt_id).
+ job_ends: list[cp_model.IntVar] = []
+
+ # Scan the jobs and create the relevant variables and intervals.
+ for job_id in all_jobs:
+ job = jobs[job_id]
+ num_tasks = len(job)
+ previous_end = None
+ for task_id in range(num_tasks):
+ task = job[task_id]
+
+ min_duration = task[0][0]
+ max_duration = task[0][0]
+
+ num_alternatives = len(task)
+ all_alternatives = range(num_alternatives)
+
+ for alt_id in range(1, num_alternatives):
+ alt_duration = task[alt_id][0]
+ min_duration = min(min_duration, alt_duration)
+ max_duration = max(max_duration, alt_duration)
+
+ # Create main interval for the task.
+ suffix_name = f"_j{job_id}_t{task_id}"
+ start = model.new_int_var(0, horizon, "start" + suffix_name)
+ duration = model.new_int_var(
+ min_duration, max_duration, "duration" + suffix_name
+ )
+ end = model.new_int_var(0, horizon, "end" + suffix_name)
+ interval = model.new_interval_var(
+ start, duration, end, "interval" + suffix_name
+ )
+
+ # Store the start for the solution.
+ starts[(job_id, task_id)] = start
+
+ # Add precedence with previous task in the same job.
+ if previous_end is not None:
+ model.add(start >= previous_end)
+ previous_end = end
+
+ # Create alternative intervals.
+ if num_alternatives > 1:
+ l_presences = []
+ for alt_id in all_alternatives:
+ alt_suffix = f"_j{job_id}_t{task_id}_a{alt_id}"
+ l_presence = model.new_bool_var("presence" + alt_suffix)
+ l_start = model.new_int_var(0, horizon, "start" + alt_suffix)
+ l_duration = task[alt_id][0]
+ l_end = model.new_int_var(0, horizon, "end" + alt_suffix)
+ l_interval = model.new_optional_interval_var(
+ l_start, l_duration, l_end, l_presence, "interval" + alt_suffix
+ )
+ l_presences.append(l_presence)
+
+ # Link the primary/global variables with the local ones.
+ model.add(start == l_start).only_enforce_if(l_presence)
+ model.add(duration == l_duration).only_enforce_if(l_presence)
+ model.add(end == l_end).only_enforce_if(l_presence)
+
+ # Add the local interval to the right machine.
+ intervals_per_resources[task[alt_id][1]].append(l_interval)
+
+ # Store the presences for the solution.
+ presences[(job_id, task_id, alt_id)] = l_presence
+
+ # Select exactly one presence variable.
+ model.add_exactly_one(l_presences)
+ else:
+ intervals_per_resources[task[0][1]].append(interval)
+ presences[(job_id, task_id, 0)] = model.new_constant(1)
+
+ if previous_end is not None:
+ job_ends.append(previous_end)
+
+ # Create machines constraints.
+ for machine_id in all_machines:
+ intervals = intervals_per_resources[machine_id]
+ if len(intervals) > 1:
+ model.add_no_overlap(intervals)
+
+ # Makespan objective
+ makespan = model.new_int_var(0, horizon, "makespan")
+ model.add_max_equality(makespan, job_ends)
+ model.minimize(makespan)
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ solution_printer = SolutionPrinter()
+ status = solver.solve(model, solution_printer)
+
+ # Print final solution.
+ if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
+ print(f"Optimal objective value: {solver.objective_value}")
for job_id in all_jobs:
- job = jobs[job_id]
- num_tasks = len(job)
- previous_end = None
- for task_id in range(num_tasks):
- task = job[task_id]
-
- min_duration = task[0][0]
- max_duration = task[0][0]
-
- num_alternatives = len(task)
- all_alternatives = range(num_alternatives)
-
- for alt_id in range(1, num_alternatives):
- alt_duration = task[alt_id][0]
- min_duration = min(min_duration, alt_duration)
- max_duration = max(max_duration, alt_duration)
-
- # Create main interval for the task.
- suffix_name = f"_j{job_id}_t{task_id}"
- start = model.new_int_var(0, horizon, "start" + suffix_name)
- duration = model.new_int_var(
- min_duration, max_duration, "duration" + suffix_name
- )
- end = model.new_int_var(0, horizon, "end" + suffix_name)
- interval = model.new_interval_var(
- start, duration, end, "interval" + suffix_name
- )
-
- # Store the start for the solution.
- starts[(job_id, task_id)] = start
-
- # Add precedence with previous task in the same job.
- if previous_end is not None:
- model.add(start >= previous_end)
- previous_end = end
-
- # Create alternative intervals.
- if num_alternatives > 1:
- l_presences = []
- for alt_id in all_alternatives:
- alt_suffix = f"_j{job_id}_t{task_id}_a{alt_id}"
- l_presence = model.new_bool_var("presence" + alt_suffix)
- l_start = model.new_int_var(0, horizon, "start" + alt_suffix)
- l_duration = task[alt_id][0]
- l_end = model.new_int_var(0, horizon, "end" + alt_suffix)
- l_interval = model.new_optional_interval_var(
- l_start, l_duration, l_end, l_presence, "interval" + alt_suffix
- )
- l_presences.append(l_presence)
-
- # Link the primary/global variables with the local ones.
- model.add(start == l_start).only_enforce_if(l_presence)
- model.add(duration == l_duration).only_enforce_if(l_presence)
- model.add(end == l_end).only_enforce_if(l_presence)
-
- # Add the local interval to the right machine.
- intervals_per_resources[task[alt_id][1]].append(l_interval)
-
- # Store the presences for the solution.
- presences[(job_id, task_id, alt_id)] = l_presence
-
- # Select exactly one presence variable.
- model.add_exactly_one(l_presences)
- else:
- intervals_per_resources[task[0][1]].append(interval)
- presences[(job_id, task_id, 0)] = model.new_constant(1)
-
- if previous_end is not None:
- job_ends.append(previous_end)
-
- # Create machines constraints.
- for machine_id in all_machines:
- intervals = intervals_per_resources[machine_id]
- if len(intervals) > 1:
- model.add_no_overlap(intervals)
-
- # Makespan objective
- makespan = model.new_int_var(0, horizon, "makespan")
- model.add_max_equality(makespan, job_ends)
- model.minimize(makespan)
-
- # Solve model.
- solver = cp_model.CpSolver()
- solution_printer = SolutionPrinter()
- status = solver.solve(model, solution_printer)
-
- # Print final solution.
- if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
- print(f"Optimal objective value: {solver.objective_value}")
- for job_id in all_jobs:
- print(f"Job {job_id}")
- for task_id, task in enumerate(jobs[job_id]):
- start_value = solver.value(starts[(job_id, task_id)])
- machine: int = -1
- task_duration: int = -1
- selected: int = -1
- for alt_id, alt in enumerate(task):
- if solver.boolean_value(presences[(job_id, task_id, alt_id)]):
- task_duration, machine = alt
- selected = alt_id
- print(
- f" task_{job_id}_{task_id} starts at {start_value} (alt"
- f" {selected}, machine {machine}, duration {task_duration})"
- )
-
- print(solver.response_stats())
+ print(f"Job {job_id}")
+ for task_id, task in enumerate(jobs[job_id]):
+ start_value = solver.value(starts[(job_id, task_id)])
+ machine: int = -1
+ task_duration: int = -1
+ selected: int = -1
+ for alt_id, alt in enumerate(task):
+ if solver.boolean_value(presences[(job_id, task_id, alt_id)]):
+ task_duration, machine = alt
+ selected = alt_id
+ print(
+ f" task_{job_id}_{task_id} starts at {start_value} (alt"
+ f" {selected}, machine {machine}, duration {task_duration})"
+ )
+
+ print(solver.response_stats())
flexible_jobshop()
diff --git a/examples/python/gate_scheduling_sat.py b/examples/python/gate_scheduling_sat.py
index 9cea61deb76..fdcad19f19f 100644
--- a/examples/python/gate_scheduling_sat.py
+++ b/examples/python/gate_scheduling_sat.py
@@ -30,135 +30,135 @@
def main(_) -> None:
- """Solves the gate scheduling problem."""
- model = cp_model.CpModel()
-
- jobs = [
- [3, 3], # [duration, width]
- [2, 5],
- [1, 3],
- [3, 7],
- [7, 3],
- [2, 2],
- [2, 2],
- [5, 5],
- [10, 2],
- [4, 3],
- [2, 6],
- [1, 2],
- [6, 8],
- [4, 5],
- [3, 7],
- ]
-
- max_width = 10
-
- horizon = sum(t[0] for t in jobs)
- num_jobs = len(jobs)
- all_jobs = range(num_jobs)
-
- intervals = []
- intervals0 = []
- intervals1 = []
- performed = []
- starts = []
- ends = []
- demands = []
+ """Solves the gate scheduling problem."""
+ model = cp_model.CpModel()
+
+ jobs = [
+ [3, 3], # [duration, width]
+ [2, 5],
+ [1, 3],
+ [3, 7],
+ [7, 3],
+ [2, 2],
+ [2, 2],
+ [5, 5],
+ [10, 2],
+ [4, 3],
+ [2, 6],
+ [1, 2],
+ [6, 8],
+ [4, 5],
+ [3, 7],
+ ]
+
+ max_width = 10
+
+ horizon = sum(t[0] for t in jobs)
+ num_jobs = len(jobs)
+ all_jobs = range(num_jobs)
+
+ intervals = []
+ intervals0 = []
+ intervals1 = []
+ performed = []
+ starts = []
+ ends = []
+ demands = []
+
+ for i in all_jobs:
+ # Create main interval.
+ start = model.new_int_var(0, horizon, f"start_{i}")
+ duration = jobs[i][0]
+ end = model.new_int_var(0, horizon, f"end_{i}")
+ interval = model.new_interval_var(start, duration, end, f"interval_{i}")
+ starts.append(start)
+ intervals.append(interval)
+ ends.append(end)
+ demands.append(jobs[i][1])
+
+ # Create an optional copy of interval to be executed on machine 0.
+ performed_on_m0 = model.new_bool_var(f"perform_{i}_on_m0")
+ performed.append(performed_on_m0)
+ start0 = model.new_int_var(0, horizon, f"start_{i}_on_m0")
+ end0 = model.new_int_var(0, horizon, f"end_{i}_on_m0")
+ interval0 = model.new_optional_interval_var(
+ start0, duration, end0, performed_on_m0, f"interval_{i}_on_m0"
+ )
+ intervals0.append(interval0)
+
+ # Create an optional copy of interval to be executed on machine 1.
+ start1 = model.new_int_var(0, horizon, f"start_{i}_on_m1")
+ end1 = model.new_int_var(0, horizon, f"end_{i}_on_m1")
+ interval1 = model.new_optional_interval_var(
+ start1,
+ duration,
+ end1,
+ ~performed_on_m0,
+ f"interval_{i}_on_m1",
+ )
+ intervals1.append(interval1)
+
+ # We only propagate the constraint if the tasks is performed on the machine.
+ model.add(start0 == start).only_enforce_if(performed_on_m0)
+ model.add(start1 == start).only_enforce_if(~performed_on_m0)
+
+ # Width constraint (modeled as a cumulative)
+ model.add_cumulative(intervals, demands, max_width)
+
+ # Choose which machine to perform the jobs on.
+ model.add_no_overlap(intervals0)
+ model.add_no_overlap(intervals1)
+
+ # Objective variable.
+ makespan = model.new_int_var(0, horizon, "makespan")
+ model.add_max_equality(makespan, ends)
+ model.minimize(makespan)
+
+ # Symmetry breaking.
+ model.add(performed[0] == 0)
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ solver.solve(model)
+
+ # Output solution.
+ if visualization.RunFromIPython():
+ output = visualization.SvgWrapper(solver.objective_value, max_width, 40.0)
+ output.AddTitle(f"Makespan = {solver.objective_value}")
+ color_manager = visualization.ColorManager()
+ color_manager.SeedRandomColor(0)
for i in all_jobs:
- # Create main interval.
- start = model.new_int_var(0, horizon, f"start_{i}")
- duration = jobs[i][0]
- end = model.new_int_var(0, horizon, f"end_{i}")
- interval = model.new_interval_var(start, duration, end, f"interval_{i}")
- starts.append(start)
- intervals.append(interval)
- ends.append(end)
- demands.append(jobs[i][1])
-
- # Create an optional copy of interval to be executed on machine 0.
- performed_on_m0 = model.new_bool_var(f"perform_{i}_on_m0")
- performed.append(performed_on_m0)
- start0 = model.new_int_var(0, horizon, f"start_{i}_on_m0")
- end0 = model.new_int_var(0, horizon, f"end_{i}_on_m0")
- interval0 = model.new_optional_interval_var(
- start0, duration, end0, performed_on_m0, f"interval_{i}_on_m0"
- )
- intervals0.append(interval0)
-
- # Create an optional copy of interval to be executed on machine 1.
- start1 = model.new_int_var(0, horizon, f"start_{i}_on_m1")
- end1 = model.new_int_var(0, horizon, f"end_{i}_on_m1")
- interval1 = model.new_optional_interval_var(
- start1,
- duration,
- end1,
- ~performed_on_m0,
- f"interval_{i}_on_m1",
- )
- intervals1.append(interval1)
-
- # We only propagate the constraint if the tasks is performed on the machine.
- model.add(start0 == start).only_enforce_if(performed_on_m0)
- model.add(start1 == start).only_enforce_if(~performed_on_m0)
-
- # Width constraint (modeled as a cumulative)
- model.add_cumulative(intervals, demands, max_width)
-
- # Choose which machine to perform the jobs on.
- model.add_no_overlap(intervals0)
- model.add_no_overlap(intervals1)
-
- # Objective variable.
- makespan = model.new_int_var(0, horizon, "makespan")
- model.add_max_equality(makespan, ends)
- model.minimize(makespan)
-
- # Symmetry breaking.
- model.add(performed[0] == 0)
-
- # Solve model.
- solver = cp_model.CpSolver()
- solver.solve(model)
-
- # Output solution.
- if visualization.RunFromIPython():
- output = visualization.SvgWrapper(solver.objective_value, max_width, 40.0)
- output.AddTitle(f"Makespan = {solver.objective_value}")
- color_manager = visualization.ColorManager()
- color_manager.SeedRandomColor(0)
-
- for i in all_jobs:
- performed_machine = 1 - solver.value(performed[i])
- start_of_task = solver.value(starts[i])
- d_x = jobs[i][0]
- d_y = jobs[i][1]
- s_y = performed_machine * (max_width - d_y)
- output.AddRectangle(
- start_of_task,
- s_y,
- d_x,
- d_y,
- color_manager.RandomColor(),
- "black",
- f"j{i}",
- )
-
- output.AddXScale()
- output.AddYScale()
- output.Display()
- else:
- print("Solution")
- print(f" - makespan = {solver.objective_value}")
- for i in all_jobs:
- performed_machine = 1 - solver.value(performed[i])
- start_of_task = solver.value(starts[i])
- print(
- f" - Job {i} starts at {start_of_task} on machine"
- f" {performed_machine}"
- )
- print(solver.response_stats())
+ performed_machine = 1 - solver.value(performed[i])
+ start_of_task = solver.value(starts[i])
+ d_x = jobs[i][0]
+ d_y = jobs[i][1]
+ s_y = performed_machine * (max_width - d_y)
+ output.AddRectangle(
+ start_of_task,
+ s_y,
+ d_x,
+ d_y,
+ color_manager.RandomColor(),
+ "black",
+ f"j{i}",
+ )
+
+ output.AddXScale()
+ output.AddYScale()
+ output.Display()
+ else:
+ print("Solution")
+ print(f" - makespan = {solver.objective_value}")
+ for i in all_jobs:
+ performed_machine = 1 - solver.value(performed[i])
+ start_of_task = solver.value(starts[i])
+ print(
+ f" - Job {i} starts at {start_of_task} on machine"
+ f" {performed_machine}"
+ )
+ print(solver.response_stats())
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/golomb8.py b/examples/python/golomb8.py
index cb2a2423ca0..4254aa34ee1 100755
--- a/examples/python/golomb8.py
+++ b/examples/python/golomb8.py
@@ -31,57 +31,55 @@
def main(_) -> None:
- # Create the solver.
- solver = pywrapcp.Solver("golomb ruler")
-
- size = 8
- var_max = size * size
- all_vars = list(range(0, size))
-
- marks = [solver.IntVar(0, var_max, "marks_%d" % i) for i in all_vars]
-
- objective = solver.Minimize(marks[size - 1], 1)
-
- solver.Add(marks[0] == 0)
-
- # We expand the creation of the diff array to avoid a pylint warning.
- diffs = []
- for i in range(size - 1):
- for j in range(i + 1, size):
- diffs.append(marks[j] - marks[i])
- solver.Add(solver.AllDifferent(diffs))
-
- solver.Add(marks[size - 1] - marks[size - 2] > marks[1] - marks[0])
- for i in range(size - 2):
- solver.Add(marks[i + 1] > marks[i])
-
- solution = solver.Assignment()
- solution.Add(marks[size - 1])
- collector = solver.AllSolutionCollector(solution)
-
- solver.Solve(
- solver.Phase(marks, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE),
- [objective, collector],
- )
- for i in range(0, collector.SolutionCount()):
- obj_value = collector.Value(i, marks[size - 1])
- time = collector.WallTime(i)
- branches = collector.Branches(i)
- failures = collector.Failures(i)
- print(
- ("Solution #%i: value = %i, failures = %i, branches = %i," "time = %i ms")
- % (i, obj_value, failures, branches, time)
- )
- time = solver.WallTime()
- branches = solver.Branches()
- failures = solver.Failures()
+ # Create the solver.
+ solver = pywrapcp.Solver("golomb ruler")
+
+ size = 8
+ var_max = size * size
+ all_vars = list(range(0, size))
+
+ marks = [solver.IntVar(0, var_max, "marks_%d" % i) for i in all_vars]
+
+ objective = solver.Minimize(marks[size - 1], 1)
+
+ solver.Add(marks[0] == 0)
+
+ # We expand the creation of the diff array to avoid a pylint warning.
+ diffs = []
+ for i in range(size - 1):
+ for j in range(i + 1, size):
+ diffs.append(marks[j] - marks[i])
+ solver.Add(solver.AllDifferent(diffs))
+
+ solver.Add(marks[size - 1] - marks[size - 2] > marks[1] - marks[0])
+ for i in range(size - 2):
+ solver.Add(marks[i + 1] > marks[i])
+
+ solution = solver.Assignment()
+ solution.Add(marks[size - 1])
+ collector = solver.AllSolutionCollector(solution)
+
+ solver.Solve(
+ solver.Phase(marks, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE),
+ [objective, collector],
+ )
+ for i in range(0, collector.SolutionCount()):
+ obj_value = collector.Value(i, marks[size - 1])
+ time = collector.WallTime(i)
+ branches = collector.Branches(i)
+ failures = collector.Failures(i)
print(
- (
- "Total run : failures = %i, branches = %i, time = %i ms"
- % (failures, branches, time)
- )
+ "Solution #%i: value = %i, failures = %i, branches = %i,time = %i ms"
+ % (i, obj_value, failures, branches, time)
)
+ time = solver.WallTime()
+ branches = solver.Branches()
+ failures = solver.Failures()
+ print((
+ "Total run : failures = %i, branches = %i, time = %i ms"
+ % (failures, branches, time)
+ ))
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/golomb_sat.py b/examples/python/golomb_sat.py
index 6b4e19cc06c..28b2b81769f 100644
--- a/examples/python/golomb_sat.py
+++ b/examples/python/golomb_sat.py
@@ -39,57 +39,57 @@
def solve_golomb_ruler(order: int, params: str) -> None:
- """Solve the Golomb ruler problem."""
- # Create the model.
- model = cp_model.CpModel()
-
- var_max = order * order
- all_vars = list(range(0, order))
-
- marks = [model.new_int_var(0, var_max, f"marks_{i}") for i in all_vars]
-
- model.add(marks[0] == 0)
- for i in range(order - 2):
- model.add(marks[i + 1] > marks[i])
-
- diffs = []
- for i in range(order - 1):
- for j in range(i + 1, order):
- diff = model.new_int_var(0, var_max, f"diff [{j},{i}]")
- model.add(diff == marks[j] - marks[i])
- diffs.append(diff)
- model.add_all_different(diffs)
-
- # symmetry breaking
- if order > 2:
- model.add(marks[order - 1] - marks[order - 2] > marks[1] - marks[0])
-
- # Objective
- model.minimize(marks[order - 1])
-
- # Solve the model.
- solver = cp_model.CpSolver()
- if params:
- text_format.Parse(params, solver.parameters)
- solution_printer = cp_model.ObjectiveSolutionPrinter()
- print(f"Golomb ruler(order={order})")
- status = solver.solve(model, solution_printer)
-
- # Print solution.
- if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
- for idx, var in enumerate(marks):
- print(f"mark[{idx}]: {solver.value(var)}")
- intervals = [solver.value(diff) for diff in diffs]
- intervals.sort()
- print(f"intervals: {intervals}")
- print(solver.response_stats())
+ """Solve the Golomb ruler problem."""
+ # Create the model.
+ model = cp_model.CpModel()
+
+ var_max = order * order
+ all_vars = list(range(0, order))
+
+ marks = [model.new_int_var(0, var_max, f"marks_{i}") for i in all_vars]
+
+ model.add(marks[0] == 0)
+ for i in range(order - 2):
+ model.add(marks[i + 1] > marks[i])
+
+ diffs = []
+ for i in range(order - 1):
+ for j in range(i + 1, order):
+ diff = model.new_int_var(0, var_max, f"diff [{j},{i}]")
+ model.add(diff == marks[j] - marks[i])
+ diffs.append(diff)
+ model.add_all_different(diffs)
+
+ # symmetry breaking
+ if order > 2:
+ model.add(marks[order - 1] - marks[order - 2] > marks[1] - marks[0])
+
+ # Objective
+ model.minimize(marks[order - 1])
+
+ # Solve the model.
+ solver = cp_model.CpSolver()
+ if params:
+ text_format.Parse(params, solver.parameters)
+ solution_printer = cp_model.ObjectiveSolutionPrinter()
+ print(f"Golomb ruler(order={order})")
+ status = solver.solve(model, solution_printer)
+
+ # Print solution.
+ if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
+ for idx, var in enumerate(marks):
+ print(f"mark[{idx}]: {solver.value(var)}")
+ intervals = [solver.value(diff) for diff in diffs]
+ intervals.sort()
+ print(f"intervals: {intervals}")
+ print(solver.response_stats())
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- solve_golomb_ruler(_ORDER.value, _PARAMS.value)
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ solve_golomb_ruler(_ORDER.value, _PARAMS.value)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/hidato_sat.py b/examples/python/hidato_sat.py
index 879ab217452..99950674c72 100755
--- a/examples/python/hidato_sat.py
+++ b/examples/python/hidato_sat.py
@@ -21,202 +21,204 @@
def build_pairs(rows: int, cols: int) -> list[tuple[int, int]]:
- """Build closeness pairs for consecutive numbers.
-
- Build set of allowed pairs such that two consecutive numbers touch
- each other in the grid.
-
- Returns:
- A list of pairs for allowed consecutive position of numbers.
-
- Args:
- rows: the number of rows in the grid
- cols: the number of columns in the grid
- """
- result = []
- for x in range(rows):
- for y in range(cols):
- for dx in (-1, 0, 1):
- for dy in (-1, 0, 1):
- if (
- x + dx >= 0
- and x + dx < rows
- and y + dy >= 0
- and y + dy < cols
- and (dx != 0 or dy != 0)
- ):
- result.append((x * cols + y, (x + dx) * cols + (y + dy)))
- return result
+ """Build closeness pairs for consecutive numbers.
+
+ Build set of allowed pairs such that two consecutive numbers touch
+ each other in the grid.
+
+ Returns:
+ A list of pairs for allowed consecutive position of numbers.
+
+ Args:
+ rows: the number of rows in the grid
+ cols: the number of columns in the grid
+ """
+ result = []
+ for x in range(rows):
+ for y in range(cols):
+ for dx in (-1, 0, 1):
+ for dy in (-1, 0, 1):
+ if (
+ x + dx >= 0
+ and x + dx < rows
+ and y + dy >= 0
+ and y + dy < cols
+ and (dx != 0 or dy != 0)
+ ):
+ result.append((x * cols + y, (x + dx) * cols + (y + dy)))
+ return result
def print_solution(positions: list[int], rows: int, cols: int):
- """Print a current solution."""
- # Create empty board.
- board = []
- for _ in range(rows):
- board.append([0] * cols)
- # Fill board with solution value.
- for k in range(rows * cols):
- position = positions[k]
- board[position // cols][position % cols] = k + 1
- # Print the board.
- print("Solution")
- print_matrix(board)
+ """Print a current solution."""
+ # Create empty board.
+ board = []
+ for _ in range(rows):
+ board.append([0] * cols)
+ # Fill board with solution value.
+ for k in range(rows * cols):
+ position = positions[k]
+ board[position // cols][position % cols] = k + 1
+ # Print the board.
+ print("Solution")
+ print_matrix(board)
def print_matrix(game: list[list[int]]) -> None:
- """Pretty print of a matrix."""
- rows = len(game)
- cols = len(game[0])
- for i in range(rows):
- line = ""
- for j in range(cols):
- if game[i][j] == 0:
- line += " ."
- else:
- line += f"{game[i][j]:3}"
- print(line)
+ """Pretty print of a matrix."""
+ rows = len(game)
+ cols = len(game[0])
+ for i in range(rows):
+ line = ""
+ for j in range(cols):
+ if game[i][j] == 0:
+ line += " ."
+ else:
+ line += f"{game[i][j]:3}"
+ print(line)
def build_puzzle(problem: int) -> Union[None, list[list[int]]]:
- """Build the problem from its index."""
- #
- # models, a 0 indicates an open cell which number is not yet known.
- #
- #
- puzzle = None
- if problem == 1:
- # Simple problem
- puzzle = [[6, 0, 9], [0, 2, 8], [1, 0, 0]]
-
- elif problem == 2:
- puzzle = [
- [0, 44, 41, 0, 0, 0, 0],
- [0, 43, 0, 28, 29, 0, 0],
- [0, 1, 0, 0, 0, 33, 0],
- [0, 2, 25, 4, 34, 0, 36],
- [49, 16, 0, 23, 0, 0, 0],
- [0, 19, 0, 0, 12, 7, 0],
- [0, 0, 0, 14, 0, 0, 0],
- ]
-
- elif problem == 3:
- # Problems from the book:
- # Gyora Bededek: 'Hidato: 2000 Pure Logic Puzzles'
- # Problem 1 (Practice)
- puzzle = [
- [0, 0, 20, 0, 0],
- [0, 0, 0, 16, 18],
- [22, 0, 15, 0, 0],
- [23, 0, 1, 14, 11],
- [0, 25, 0, 0, 12],
- ]
-
- elif problem == 4:
- # problem 2 (Practice)
- puzzle = [
- [0, 0, 0, 0, 14],
- [0, 18, 12, 0, 0],
- [0, 0, 17, 4, 5],
- [0, 0, 7, 0, 0],
- [9, 8, 25, 1, 0],
- ]
-
- elif problem == 5:
- # problem 3 (Beginner)
- puzzle = [
- [0, 26, 0, 0, 0, 18],
- [0, 0, 27, 0, 0, 19],
- [31, 23, 0, 0, 14, 0],
- [0, 33, 8, 0, 15, 1],
- [0, 0, 0, 5, 0, 0],
- [35, 36, 0, 10, 0, 0],
- ]
- elif problem == 6:
- # Problem 15 (Intermediate)
- puzzle = [
- [64, 0, 0, 0, 0, 0, 0, 0],
- [1, 63, 0, 59, 15, 57, 53, 0],
- [0, 4, 0, 14, 0, 0, 0, 0],
- [3, 0, 11, 0, 20, 19, 0, 50],
- [0, 0, 0, 0, 22, 0, 48, 40],
- [9, 0, 0, 32, 23, 0, 0, 41],
- [27, 0, 0, 0, 36, 0, 46, 0],
- [28, 30, 0, 35, 0, 0, 0, 0],
- ]
- return puzzle
+ """Build the problem from its index."""
+ #
+ # models, a 0 indicates an open cell which number is not yet known.
+ #
+ #
+ puzzle = None
+ if problem == 1:
+ # Simple problem
+ puzzle = [[6, 0, 9], [0, 2, 8], [1, 0, 0]]
+
+ elif problem == 2:
+ puzzle = [
+ [0, 44, 41, 0, 0, 0, 0],
+ [0, 43, 0, 28, 29, 0, 0],
+ [0, 1, 0, 0, 0, 33, 0],
+ [0, 2, 25, 4, 34, 0, 36],
+ [49, 16, 0, 23, 0, 0, 0],
+ [0, 19, 0, 0, 12, 7, 0],
+ [0, 0, 0, 14, 0, 0, 0],
+ ]
+
+ elif problem == 3:
+ # Problems from the book:
+ # Gyora Bededek: 'Hidato: 2000 Pure Logic Puzzles'
+ # Problem 1 (Practice)
+ puzzle = [
+ [0, 0, 20, 0, 0],
+ [0, 0, 0, 16, 18],
+ [22, 0, 15, 0, 0],
+ [23, 0, 1, 14, 11],
+ [0, 25, 0, 0, 12],
+ ]
+
+ elif problem == 4:
+ # problem 2 (Practice)
+ puzzle = [
+ [0, 0, 0, 0, 14],
+ [0, 18, 12, 0, 0],
+ [0, 0, 17, 4, 5],
+ [0, 0, 7, 0, 0],
+ [9, 8, 25, 1, 0],
+ ]
+
+ elif problem == 5:
+ # problem 3 (Beginner)
+ puzzle = [
+ [0, 26, 0, 0, 0, 18],
+ [0, 0, 27, 0, 0, 19],
+ [31, 23, 0, 0, 14, 0],
+ [0, 33, 8, 0, 15, 1],
+ [0, 0, 0, 5, 0, 0],
+ [35, 36, 0, 10, 0, 0],
+ ]
+ elif problem == 6:
+ # Problem 15 (Intermediate)
+ puzzle = [
+ [64, 0, 0, 0, 0, 0, 0, 0],
+ [1, 63, 0, 59, 15, 57, 53, 0],
+ [0, 4, 0, 14, 0, 0, 0, 0],
+ [3, 0, 11, 0, 20, 19, 0, 50],
+ [0, 0, 0, 0, 22, 0, 48, 40],
+ [9, 0, 0, 32, 23, 0, 0, 41],
+ [27, 0, 0, 0, 36, 0, 46, 0],
+ [28, 30, 0, 35, 0, 0, 0, 0],
+ ]
+ return puzzle
def solve_hidato(puzzle: list[list[int]], index: int) -> None:
- """solve the given hidato table."""
- # Create the model.
- model = cp_model.CpModel()
-
- r = len(puzzle)
- c = len(puzzle[0])
- if not visualization.RunFromIPython():
- print("")
- print(f"----- Solving problem {index} -----")
- print("")
- print(f"Initial game ({r} x {c})")
- print_matrix(puzzle)
-
- #
- # Declare variables.
- #
- positions = [model.new_int_var(0, r * c - 1, f"p[{i}]") for i in range(r * c)]
-
- #
- # Constraints.
- #
- model.add_all_different(positions)
-
- #
- # Fill in the clues.
- #
- for i in range(r):
- for j in range(c):
- if puzzle[i][j] > 0:
- model.add(positions[puzzle[i][j] - 1] == i * c + j)
-
- # Consecutive numbers much touch each other in the grid.
- # We use an allowed assignment constraint to model it.
- close_tuples = build_pairs(r, c)
- for k in range(0, r * c - 1):
- model.add_allowed_assignments([positions[k], positions[k + 1]], close_tuples)
-
- #
- # Solution and search.
- #
-
- solver = cp_model.CpSolver()
- status = solver.solve(model)
-
- if status == cp_model.OPTIMAL:
- if visualization.RunFromIPython():
- output = visualization.SvgWrapper(10, r, 40.0)
- for i, var in enumerate(positions):
- val = solver.value(var)
- x = val % c
- y = val // c
- color = "white" if puzzle[y][x] == 0 else "lightgreen"
- output.AddRectangle(x, r - y - 1, 1, 1, color, "black", str(i + 1))
-
- output.AddTitle(f"Puzzle {index} solved in {solver.wall_time:.2f} s")
- output.Display()
- else:
- print_solution(
- [solver.value(x) for x in positions],
- r,
- c,
- )
- print(solver.response_stats())
+ """solve the given hidato table."""
+ # Create the model.
+ model = cp_model.CpModel()
+
+ r = len(puzzle)
+ c = len(puzzle[0])
+ if not visualization.RunFromIPython():
+ print("")
+ print(f"----- Solving problem {index} -----")
+ print("")
+ print(f"Initial game ({r} x {c})")
+ print_matrix(puzzle)
+
+ #
+ # Declare variables.
+ #
+ positions = [model.new_int_var(0, r * c - 1, f"p[{i}]") for i in range(r * c)]
+
+ #
+ # Constraints.
+ #
+ model.add_all_different(positions)
+
+ #
+ # Fill in the clues.
+ #
+ for i in range(r):
+ for j in range(c):
+ if puzzle[i][j] > 0:
+ model.add(positions[puzzle[i][j] - 1] == i * c + j)
+
+ # Consecutive numbers much touch each other in the grid.
+ # We use an allowed assignment constraint to model it.
+ close_tuples = build_pairs(r, c)
+ for k in range(0, r * c - 1):
+ model.add_allowed_assignments(
+ [positions[k], positions[k + 1]], close_tuples
+ )
+
+ #
+ # Solution and search.
+ #
+
+ solver = cp_model.CpSolver()
+ status = solver.solve(model)
+
+ if status == cp_model.OPTIMAL:
+ if visualization.RunFromIPython():
+ output = visualization.SvgWrapper(10, r, 40.0)
+ for i, var in enumerate(positions):
+ val = solver.value(var)
+ x = val % c
+ y = val // c
+ color = "white" if puzzle[y][x] == 0 else "lightgreen"
+ output.AddRectangle(x, r - y - 1, 1, 1, color, "black", str(i + 1))
+
+ output.AddTitle(f"Puzzle {index} solved in {solver.wall_time:.2f} s")
+ output.Display()
+ else:
+ print_solution(
+ [solver.value(x) for x in positions],
+ r,
+ c,
+ )
+ print(solver.response_stats())
def main(_):
- for pb in range(1, 7):
- solve_hidato(build_puzzle(pb), pb)
+ for pb in range(1, 7):
+ solve_hidato(build_puzzle(pb), pb)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/integer_programming.py b/examples/python/integer_programming.py
index 8a040bd4337..e46c83123a0 100755
--- a/examples/python/integer_programming.py
+++ b/examples/python/integer_programming.py
@@ -18,106 +18,110 @@
def Announce(solver, api_type):
- print(
- "---- Integer programming example with " + solver + " (" + api_type + ") -----"
- )
+ print(
+ "---- Integer programming example with "
+ + solver
+ + " ("
+ + api_type
+ + ") -----"
+ )
def RunIntegerExampleNaturalLanguageAPI(optimization_problem_type):
- """Example of simple integer program with natural language API."""
+ """Example of simple integer program with natural language API."""
- solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
- if not solver:
- return
+ solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
+ if not solver:
+ return
- Announce(optimization_problem_type, "natural language API")
+ Announce(optimization_problem_type, "natural language API")
- infinity = solver.infinity()
- # x1 and x2 are integer non-negative variables.
- x1 = solver.IntVar(0.0, infinity, "x1")
- x2 = solver.IntVar(0.0, infinity, "x2")
+ infinity = solver.infinity()
+ # x1 and x2 are integer non-negative variables.
+ x1 = solver.IntVar(0.0, infinity, "x1")
+ x2 = solver.IntVar(0.0, infinity, "x2")
- solver.Minimize(x1 + 2 * x2)
- solver.Add(3 * x1 + 2 * x2 >= 17)
+ solver.Minimize(x1 + 2 * x2)
+ solver.Add(3 * x1 + 2 * x2 >= 17)
- SolveAndPrint(solver, [x1, x2])
+ SolveAndPrint(solver, [x1, x2])
def RunIntegerExampleCppStyleAPI(optimization_problem_type):
- """Example of simple integer program with the C++ style API."""
- solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
- if not solver:
- return
+ """Example of simple integer program with the C++ style API."""
+ solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
+ if not solver:
+ return
- Announce(optimization_problem_type, "C++ style API")
+ Announce(optimization_problem_type, "C++ style API")
- infinity = solver.infinity()
- # x1 and x2 are integer non-negative variables.
- x1 = solver.IntVar(0.0, infinity, "x1")
- x2 = solver.IntVar(0.0, infinity, "x2")
+ infinity = solver.infinity()
+ # x1 and x2 are integer non-negative variables.
+ x1 = solver.IntVar(0.0, infinity, "x1")
+ x2 = solver.IntVar(0.0, infinity, "x2")
- # Minimize x1 + 2 * x2.
- objective = solver.Objective()
- objective.SetCoefficient(x1, 1)
- objective.SetCoefficient(x2, 2)
+ # Minimize x1 + 2 * x2.
+ objective = solver.Objective()
+ objective.SetCoefficient(x1, 1)
+ objective.SetCoefficient(x2, 2)
- # 2 * x2 + 3 * x1 >= 17.
- ct = solver.Constraint(17, infinity)
- ct.SetCoefficient(x1, 3)
- ct.SetCoefficient(x2, 2)
+ # 2 * x2 + 3 * x1 >= 17.
+ ct = solver.Constraint(17, infinity)
+ ct.SetCoefficient(x1, 3)
+ ct.SetCoefficient(x2, 2)
- SolveAndPrint(solver, [x1, x2])
+ SolveAndPrint(solver, [x1, x2])
def SolveAndPrint(solver, variable_list):
- """Solve the problem and print the solution."""
- print("Number of variables = %d" % solver.NumVariables())
- print("Number of constraints = %d" % solver.NumConstraints())
+ """Solve the problem and print the solution."""
+ print("Number of variables = %d" % solver.NumVariables())
+ print("Number of constraints = %d" % solver.NumConstraints())
- result_status = solver.Solve()
+ result_status = solver.Solve()
- # The problem has an optimal solution.
- assert result_status == pywraplp.Solver.OPTIMAL
+ # The problem has an optimal solution.
+ assert result_status == pywraplp.Solver.OPTIMAL
- # The solution looks legit (when using solvers others than
- # GLOP_LINEAR_PROGRAMMING, verifying the solution is highly recommended!).
- assert solver.VerifySolution(1e-7, True)
+ # The solution looks legit (when using solvers others than
+ # GLOP_LINEAR_PROGRAMMING, verifying the solution is highly recommended!).
+ assert solver.VerifySolution(1e-7, True)
- print("Problem solved in %f milliseconds" % solver.wall_time())
+ print("Problem solved in %f milliseconds" % solver.wall_time())
- # The objective value of the solution.
- print("Optimal objective value = %f" % solver.Objective().Value())
+ # The objective value of the solution.
+ print("Optimal objective value = %f" % solver.Objective().Value())
- # The value of each variable in the solution.
- for variable in variable_list:
- print("%s = %f" % (variable.name(), variable.solution_value()))
+ # The value of each variable in the solution.
+ for variable in variable_list:
+ print("%s = %f" % (variable.name(), variable.solution_value()))
- print("Advanced usage:")
- print("Problem solved in %d branch-and-bound nodes" % solver.nodes())
+ print("Advanced usage:")
+ print("Problem solved in %d branch-and-bound nodes" % solver.nodes())
def RunAllIntegerExampleNaturalLanguageAPI():
- RunIntegerExampleNaturalLanguageAPI("GLPK")
- # Disabling due to ASAN errors with CBC.
- # RunIntegerExampleNaturalLanguageAPI('CBC')
- RunIntegerExampleNaturalLanguageAPI("SCIP")
- RunIntegerExampleNaturalLanguageAPI("SAT")
- RunIntegerExampleNaturalLanguageAPI("XPRESS")
+ RunIntegerExampleNaturalLanguageAPI("GLPK")
+ # Disabling due to ASAN errors with CBC.
+ # RunIntegerExampleNaturalLanguageAPI('CBC')
+ RunIntegerExampleNaturalLanguageAPI("SCIP")
+ RunIntegerExampleNaturalLanguageAPI("SAT")
+ RunIntegerExampleNaturalLanguageAPI("XPRESS")
def RunAllIntegerExampleCppStyleAPI():
- RunIntegerExampleCppStyleAPI("GLPK")
- # Disabling due to ASAN errors with CBC.
- # RunIntegerExampleCppStyleAPI('CBC')
- RunIntegerExampleCppStyleAPI("SCIP")
- RunIntegerExampleCppStyleAPI("SAT")
- RunIntegerExampleCppStyleAPI("XPRESS")
+ RunIntegerExampleCppStyleAPI("GLPK")
+ # Disabling due to ASAN errors with CBC.
+ # RunIntegerExampleCppStyleAPI('CBC')
+ RunIntegerExampleCppStyleAPI("SCIP")
+ RunIntegerExampleCppStyleAPI("SAT")
+ RunIntegerExampleCppStyleAPI("XPRESS")
def main():
- RunAllIntegerExampleNaturalLanguageAPI()
- RunAllIntegerExampleCppStyleAPI()
+ RunAllIntegerExampleNaturalLanguageAPI()
+ RunAllIntegerExampleCppStyleAPI()
if __name__ == "__main__":
- main()
+ main()
diff --git a/examples/python/jobshop_ft06_distance_sat.py b/examples/python/jobshop_ft06_distance_sat.py
index ffe1901f1d7..8c3f5728b40 100755
--- a/examples/python/jobshop_ft06_distance_sat.py
+++ b/examples/python/jobshop_ft06_distance_sat.py
@@ -32,116 +32,116 @@
def distance_between_jobs(x: int, y: int) -> int:
- """Returns the distance between tasks of job x and tasks of job y."""
- return abs(x - y)
+ """Returns the distance between tasks of job x and tasks of job y."""
+ return abs(x - y)
def jobshop_ft06_distance() -> None:
- """Solves the ft06 jobshop with distances between tasks."""
- # Creates the model.
- model = cp_model.CpModel()
-
- machines_count = 6
- jobs_count = 6
- all_machines = range(0, machines_count)
- all_jobs = range(0, jobs_count)
-
- durations = [
- [1, 3, 6, 7, 3, 6],
- [8, 5, 10, 10, 10, 4],
- [5, 4, 8, 9, 1, 7],
- [5, 5, 5, 3, 8, 9],
- [9, 3, 5, 4, 3, 1],
- [3, 3, 9, 10, 4, 1],
- ]
-
- machines = [
- [2, 0, 1, 3, 5, 4],
- [1, 2, 4, 5, 0, 3],
- [2, 3, 5, 0, 1, 4],
- [1, 0, 2, 3, 4, 5],
- [2, 1, 4, 5, 0, 3],
- [1, 3, 5, 0, 4, 2],
- ]
-
- # Computes horizon statically.
- horizon = 150
-
- task_type = collections.namedtuple("task_type", "start end interval")
-
- # Creates jobs.
- all_tasks = {}
- for i in all_jobs:
- for j in all_machines:
- start_var = model.new_int_var(0, horizon, f"start_{i}_{j}")
- duration = durations[i][j]
- end_var = model.new_int_var(0, horizon, f"end_{i}_{j}")
- interval_var = model.new_interval_var(
- start_var, duration, end_var, f"interval_{i}_{j}"
- )
- all_tasks[(i, j)] = task_type(
- start=start_var, end=end_var, interval=interval_var
- )
-
- # Create disjuctive constraints.
- for i in all_machines:
- job_intervals = []
- job_indices = []
- job_starts = []
- job_ends = []
- for j in all_jobs:
- for k in all_machines:
- if machines[j][k] == i:
- job_intervals.append(all_tasks[(j, k)].interval)
- job_indices.append(j)
- job_starts.append(all_tasks[(j, k)].start)
- job_ends.append(all_tasks[(j, k)].end)
- model.add_no_overlap(job_intervals)
-
- arcs = []
- for j1 in range(len(job_intervals)):
- # Initial arc from the dummy node (0) to a task.
- start_lit = model.new_bool_var(f"{j1} is first job")
- arcs.append((0, j1 + 1, start_lit))
- # Final arc from an arc to the dummy node.
- arcs.append((j1 + 1, 0, model.new_bool_var(f"{j1} is last job")))
-
- for j2 in range(len(job_intervals)):
- if j1 == j2:
- continue
-
- lit = model.new_bool_var(f"{j2} follows {j1}")
- arcs.append((j1 + 1, j2 + 1, lit))
-
- # We add the reified precedence to link the literal with the
- # times of the two tasks.
- min_distance = distance_between_jobs(j1, j2)
- model.add(
- job_starts[j2] >= job_ends[j1] + min_distance
- ).only_enforce_if(lit)
-
- model.add_circuit(arcs)
-
- # Precedences inside a job.
- for i in all_jobs:
- for j in range(0, machines_count - 1):
- model.add(all_tasks[(i, j + 1)].start >= all_tasks[(i, j)].end)
-
- # Makespan objective.
- obj_var = model.new_int_var(0, horizon, "makespan")
- model.add_max_equality(
- obj_var, [all_tasks[(i, machines_count - 1)].end for i in all_jobs]
- )
- model.minimize(obj_var)
-
- # Solve model.
- solver = cp_model.CpSolver()
- status = solver.solve(model)
-
- # Output solution.
- if status == cp_model.OPTIMAL:
- print(f"Optimal makespan: {solver.objective_value}")
- print(solver.response_stats())
+ """Solves the ft06 jobshop with distances between tasks."""
+ # Creates the model.
+ model = cp_model.CpModel()
+
+ machines_count = 6
+ jobs_count = 6
+ all_machines = range(0, machines_count)
+ all_jobs = range(0, jobs_count)
+
+ durations = [
+ [1, 3, 6, 7, 3, 6],
+ [8, 5, 10, 10, 10, 4],
+ [5, 4, 8, 9, 1, 7],
+ [5, 5, 5, 3, 8, 9],
+ [9, 3, 5, 4, 3, 1],
+ [3, 3, 9, 10, 4, 1],
+ ]
+
+ machines = [
+ [2, 0, 1, 3, 5, 4],
+ [1, 2, 4, 5, 0, 3],
+ [2, 3, 5, 0, 1, 4],
+ [1, 0, 2, 3, 4, 5],
+ [2, 1, 4, 5, 0, 3],
+ [1, 3, 5, 0, 4, 2],
+ ]
+
+ # Computes horizon statically.
+ horizon = 150
+
+ task_type = collections.namedtuple("task_type", "start end interval")
+
+ # Creates jobs.
+ all_tasks = {}
+ for i in all_jobs:
+ for j in all_machines:
+ start_var = model.new_int_var(0, horizon, f"start_{i}_{j}")
+ duration = durations[i][j]
+ end_var = model.new_int_var(0, horizon, f"end_{i}_{j}")
+ interval_var = model.new_interval_var(
+ start_var, duration, end_var, f"interval_{i}_{j}"
+ )
+ all_tasks[(i, j)] = task_type(
+ start=start_var, end=end_var, interval=interval_var
+ )
+
+ # Create disjuctive constraints.
+ for i in all_machines:
+ job_intervals = []
+ job_indices = []
+ job_starts = []
+ job_ends = []
+ for j in all_jobs:
+ for k in all_machines:
+ if machines[j][k] == i:
+ job_intervals.append(all_tasks[(j, k)].interval)
+ job_indices.append(j)
+ job_starts.append(all_tasks[(j, k)].start)
+ job_ends.append(all_tasks[(j, k)].end)
+ model.add_no_overlap(job_intervals)
+
+ arcs = []
+ for j1 in range(len(job_intervals)):
+ # Initial arc from the dummy node (0) to a task.
+ start_lit = model.new_bool_var(f"{j1} is first job")
+ arcs.append((0, j1 + 1, start_lit))
+ # Final arc from an arc to the dummy node.
+ arcs.append((j1 + 1, 0, model.new_bool_var(f"{j1} is last job")))
+
+ for j2 in range(len(job_intervals)):
+ if j1 == j2:
+ continue
+
+ lit = model.new_bool_var(f"{j2} follows {j1}")
+ arcs.append((j1 + 1, j2 + 1, lit))
+
+ # We add the reified precedence to link the literal with the
+ # times of the two tasks.
+ min_distance = distance_between_jobs(j1, j2)
+ model.add(
+ job_starts[j2] >= job_ends[j1] + min_distance
+ ).only_enforce_if(lit)
+
+ model.add_circuit(arcs)
+
+ # Precedences inside a job.
+ for i in all_jobs:
+ for j in range(0, machines_count - 1):
+ model.add(all_tasks[(i, j + 1)].start >= all_tasks[(i, j)].end)
+
+ # Makespan objective.
+ obj_var = model.new_int_var(0, horizon, "makespan")
+ model.add_max_equality(
+ obj_var, [all_tasks[(i, machines_count - 1)].end for i in all_jobs]
+ )
+ model.minimize(obj_var)
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ status = solver.solve(model)
+
+ # Output solution.
+ if status == cp_model.OPTIMAL:
+ print(f"Optimal makespan: {solver.objective_value}")
+ print(solver.response_stats())
jobshop_ft06_distance()
diff --git a/examples/python/jobshop_ft06_sat.py b/examples/python/jobshop_ft06_sat.py
index 0c22d03c97f..5fd7a600adc 100755
--- a/examples/python/jobshop_ft06_sat.py
+++ b/examples/python/jobshop_ft06_sat.py
@@ -30,90 +30,90 @@
def jobshop_ft06() -> None:
- """Solves the ft06 jobshop."""
- # Creates the solver.
- model = cp_model.CpModel()
-
- machines_count = 6
- jobs_count = 6
- all_machines = range(0, machines_count)
- all_jobs = range(0, jobs_count)
-
- durations = [
- [1, 3, 6, 7, 3, 6],
- [8, 5, 10, 10, 10, 4],
- [5, 4, 8, 9, 1, 7],
- [5, 5, 5, 3, 8, 9],
- [9, 3, 5, 4, 3, 1],
- [3, 3, 9, 10, 4, 1],
- ]
-
- machines = [
- [2, 0, 1, 3, 5, 4],
- [1, 2, 4, 5, 0, 3],
- [2, 3, 5, 0, 1, 4],
- [1, 0, 2, 3, 4, 5],
- [2, 1, 4, 5, 0, 3],
- [1, 3, 5, 0, 4, 2],
- ]
-
- # Computes horizon dynamically.
- horizon = sum([sum(durations[i]) for i in all_jobs])
-
- task_type = collections.namedtuple("task_type", "start end interval")
-
- # Creates jobs.
- all_tasks = {}
- for i in all_jobs:
- for j in all_machines:
- start_var = model.new_int_var(0, horizon, f"start_{i}_{j}")
- duration = durations[i][j]
- end_var = model.new_int_var(0, horizon, f"end_{i}_{j}")
- interval_var = model.new_interval_var(
- start_var, duration, end_var, f"interval_{i}_{j}"
- )
- all_tasks[(i, j)] = task_type(
- start=start_var, end=end_var, interval=interval_var
- )
-
- # Create disjuctive constraints.
- machine_to_jobs = {}
- for i in all_machines:
- machines_jobs = []
- for j in all_jobs:
- for k in all_machines:
- if machines[j][k] == i:
- machines_jobs.append(all_tasks[(j, k)].interval)
- machine_to_jobs[i] = machines_jobs
- model.add_no_overlap(machines_jobs)
-
- # Precedences inside a job.
- for i in all_jobs:
- for j in range(0, machines_count - 1):
- model.add(all_tasks[(i, j + 1)].start >= all_tasks[(i, j)].end)
-
- # Makespan objective.
- obj_var = model.new_int_var(0, horizon, "makespan")
- model.add_max_equality(
- obj_var, [all_tasks[(i, machines_count - 1)].end for i in all_jobs]
- )
- model.minimize(obj_var)
-
- # Solve the model.
- solver = cp_model.CpSolver()
- solver.parameters.log_search_progress = True
- status = solver.solve(model)
-
- # Output the solution.
- if status == cp_model.OPTIMAL:
- if visualization.RunFromIPython():
- starts = [
- [solver.value(all_tasks[(i, j)][0]) for j in all_machines]
- for i in all_jobs
- ]
- visualization.DisplayJobshop(starts, durations, machines, "FT06")
- else:
- print(f"Optimal makespan: {solver.objective_value}")
+ """Solves the ft06 jobshop."""
+ # Creates the solver.
+ model = cp_model.CpModel()
+
+ machines_count = 6
+ jobs_count = 6
+ all_machines = range(0, machines_count)
+ all_jobs = range(0, jobs_count)
+
+ durations = [
+ [1, 3, 6, 7, 3, 6],
+ [8, 5, 10, 10, 10, 4],
+ [5, 4, 8, 9, 1, 7],
+ [5, 5, 5, 3, 8, 9],
+ [9, 3, 5, 4, 3, 1],
+ [3, 3, 9, 10, 4, 1],
+ ]
+
+ machines = [
+ [2, 0, 1, 3, 5, 4],
+ [1, 2, 4, 5, 0, 3],
+ [2, 3, 5, 0, 1, 4],
+ [1, 0, 2, 3, 4, 5],
+ [2, 1, 4, 5, 0, 3],
+ [1, 3, 5, 0, 4, 2],
+ ]
+
+ # Computes horizon dynamically.
+ horizon = sum([sum(durations[i]) for i in all_jobs])
+
+ task_type = collections.namedtuple("task_type", "start end interval")
+
+ # Creates jobs.
+ all_tasks = {}
+ for i in all_jobs:
+ for j in all_machines:
+ start_var = model.new_int_var(0, horizon, f"start_{i}_{j}")
+ duration = durations[i][j]
+ end_var = model.new_int_var(0, horizon, f"end_{i}_{j}")
+ interval_var = model.new_interval_var(
+ start_var, duration, end_var, f"interval_{i}_{j}"
+ )
+ all_tasks[(i, j)] = task_type(
+ start=start_var, end=end_var, interval=interval_var
+ )
+
+ # Create disjuctive constraints.
+ machine_to_jobs = {}
+ for i in all_machines:
+ machines_jobs = []
+ for j in all_jobs:
+ for k in all_machines:
+ if machines[j][k] == i:
+ machines_jobs.append(all_tasks[(j, k)].interval)
+ machine_to_jobs[i] = machines_jobs
+ model.add_no_overlap(machines_jobs)
+
+ # Precedences inside a job.
+ for i in all_jobs:
+ for j in range(0, machines_count - 1):
+ model.add(all_tasks[(i, j + 1)].start >= all_tasks[(i, j)].end)
+
+ # Makespan objective.
+ obj_var = model.new_int_var(0, horizon, "makespan")
+ model.add_max_equality(
+ obj_var, [all_tasks[(i, machines_count - 1)].end for i in all_jobs]
+ )
+ model.minimize(obj_var)
+
+ # Solve the model.
+ solver = cp_model.CpSolver()
+ solver.parameters.log_search_progress = True
+ status = solver.solve(model)
+
+ # Output the solution.
+ if status == cp_model.OPTIMAL:
+ if visualization.RunFromIPython():
+ starts = [
+ [solver.value(all_tasks[(i, j)][0]) for j in all_machines]
+ for i in all_jobs
+ ]
+ visualization.DisplayJobshop(starts, durations, machines, "FT06")
+ else:
+ print(f"Optimal makespan: {solver.objective_value}")
jobshop_ft06()
diff --git a/examples/python/jobshop_with_maintenance_sat.py b/examples/python/jobshop_with_maintenance_sat.py
index 6c17e4074db..7b51fe8aaef 100644
--- a/examples/python/jobshop_with_maintenance_sat.py
+++ b/examples/python/jobshop_with_maintenance_sat.py
@@ -21,142 +21,145 @@
class SolutionPrinter(cp_model.CpSolverSolutionCallback):
- """Print intermediate solutions."""
+ """Print intermediate solutions."""
- def __init__(self) -> None:
- cp_model.CpSolverSolutionCallback.__init__(self)
- self.__solution_count = 0
+ def __init__(self) -> None:
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self.__solution_count = 0
- def on_solution_callback(self) -> None:
- """Called at each new solution."""
- print(
- f"Solution {self.__solution_count}, time = {self.wall_time} s,"
- f" objective = {self.objective_value}"
- )
- self.__solution_count += 1
-
-
-def jobshop_with_maintenance() -> None:
- """Solves a jobshop with maintenance on one machine."""
- # Create the model.
- model = cp_model.CpModel()
-
- jobs_data = [ # task = (machine_id, processing_time).
- [(0, 3), (1, 2), (2, 2)], # Job0
- [(0, 2), (2, 1), (1, 4)], # Job1
- [(1, 4), (2, 3)], # Job2
- ]
-
- machines_count = 1 + max(task[0] for job in jobs_data for task in job)
- all_machines = range(machines_count)
-
- # Computes horizon dynamically as the sum of all durations.
- horizon = sum(task[1] for job in jobs_data for task in job)
-
- # Named tuple to store information about created variables.
- task_type = collections.namedtuple("task_type", "start end interval")
- # Named tuple to manipulate solution information.
- assigned_task_type = collections.namedtuple(
- "assigned_task_type", "start job index duration"
+ def on_solution_callback(self) -> None:
+ """Called at each new solution."""
+ print(
+ f"Solution {self.__solution_count}, time = {self.wall_time} s,"
+ f" objective = {self.objective_value}"
)
+ self.__solution_count += 1
- # Creates job intervals and add to the corresponding machine lists.
- all_tasks = {}
- machine_to_intervals = collections.defaultdict(list)
+def jobshop_with_maintenance() -> None:
+ """Solves a jobshop with maintenance on one machine."""
+ # Create the model.
+ model = cp_model.CpModel()
+
+ jobs_data = [ # task = (machine_id, processing_time).
+ [(0, 3), (1, 2), (2, 2)], # Job0
+ [(0, 2), (2, 1), (1, 4)], # Job1
+ [(1, 4), (2, 3)], # Job2
+ ]
+
+ machines_count = 1 + max(task[0] for job in jobs_data for task in job)
+ all_machines = range(machines_count)
+
+ # Computes horizon dynamically as the sum of all durations.
+ horizon = sum(task[1] for job in jobs_data for task in job)
+
+ # Named tuple to store information about created variables.
+ task_type = collections.namedtuple("task_type", "start end interval")
+ # Named tuple to manipulate solution information.
+ assigned_task_type = collections.namedtuple(
+ "assigned_task_type", "start job index duration"
+ )
+
+ # Creates job intervals and add to the corresponding machine lists.
+ all_tasks = {}
+ machine_to_intervals = collections.defaultdict(list)
+
+ for job_id, job in enumerate(jobs_data):
+ for entry in enumerate(job):
+ task_id, task = entry
+ machine, duration = task
+ suffix = f"_{job_id}_{task_id}"
+ start_var = model.new_int_var(0, horizon, "start" + suffix)
+ end_var = model.new_int_var(0, horizon, "end" + suffix)
+ interval_var = model.new_interval_var(
+ start_var, duration, end_var, "interval" + suffix
+ )
+ all_tasks[job_id, task_id] = task_type(
+ start=start_var, end=end_var, interval=interval_var
+ )
+ machine_to_intervals[machine].append(interval_var)
+
+ # Add maintenance interval (machine 0 is not available on time {4, 5, 6, 7}).
+ machine_to_intervals[0].append(model.new_interval_var(4, 4, 8, "weekend_0"))
+
+ # Create and add disjunctive constraints.
+ for machine in all_machines:
+ model.add_no_overlap(machine_to_intervals[machine])
+
+ # Precedences inside a job.
+ for job_id, job in enumerate(jobs_data):
+ for task_id in range(len(job) - 1):
+ model.add(
+ all_tasks[job_id, task_id + 1].start >= all_tasks[job_id, task_id].end
+ )
+
+ # Makespan objective.
+ obj_var = model.new_int_var(0, horizon, "makespan")
+ model.add_max_equality(
+ obj_var,
+ [
+ all_tasks[job_id, len(job) - 1].end
+ for job_id, job in enumerate(jobs_data)
+ ],
+ )
+ model.minimize(obj_var)
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ solution_printer = SolutionPrinter()
+ status = solver.solve(model, solution_printer)
+
+ # Output solution.
+ if status == cp_model.OPTIMAL:
+ # Create one list of assigned tasks per machine.
+ assigned_jobs = collections.defaultdict(list)
for job_id, job in enumerate(jobs_data):
- for entry in enumerate(job):
- task_id, task = entry
- machine, duration = task
- suffix = f"_{job_id}_{task_id}"
- start_var = model.new_int_var(0, horizon, "start" + suffix)
- end_var = model.new_int_var(0, horizon, "end" + suffix)
- interval_var = model.new_interval_var(
- start_var, duration, end_var, "interval" + suffix
- )
- all_tasks[job_id, task_id] = task_type(
- start=start_var, end=end_var, interval=interval_var
+ for task_id, task in enumerate(job):
+ machine = task[0]
+ assigned_jobs[machine].append(
+ assigned_task_type(
+ start=solver.value(all_tasks[job_id, task_id].start),
+ job=job_id,
+ index=task_id,
+ duration=task[1],
)
- machine_to_intervals[machine].append(interval_var)
-
- # Add maintenance interval (machine 0 is not available on time {4, 5, 6, 7}).
- machine_to_intervals[0].append(model.new_interval_var(4, 4, 8, "weekend_0"))
+ )
- # Create and add disjunctive constraints.
+ # Create per machine output lines.
+ output = ""
for machine in all_machines:
- model.add_no_overlap(machine_to_intervals[machine])
+ # Sort by starting time.
+ assigned_jobs[machine].sort()
+ sol_line_tasks = "Machine " + str(machine) + ": "
+ sol_line = " "
- # Precedences inside a job.
- for job_id, job in enumerate(jobs_data):
- for task_id in range(len(job) - 1):
- model.add(
- all_tasks[job_id, task_id + 1].start >= all_tasks[job_id, task_id].end
- )
+ for assigned_task in assigned_jobs[machine]:
+ name = f"job_{assigned_task.job}_{assigned_task.index}"
+ # add spaces to output to align columns.
+ sol_line_tasks += f"{name:>10}"
+ start = assigned_task.start
+ duration = assigned_task.duration
- # Makespan objective.
- obj_var = model.new_int_var(0, horizon, "makespan")
- model.add_max_equality(
- obj_var,
- [all_tasks[job_id, len(job) - 1].end for job_id, job in enumerate(jobs_data)],
- )
- model.minimize(obj_var)
-
- # Solve model.
- solver = cp_model.CpSolver()
- solution_printer = SolutionPrinter()
- status = solver.solve(model, solution_printer)
-
- # Output solution.
- if status == cp_model.OPTIMAL:
- # Create one list of assigned tasks per machine.
- assigned_jobs = collections.defaultdict(list)
- for job_id, job in enumerate(jobs_data):
- for task_id, task in enumerate(job):
- machine = task[0]
- assigned_jobs[machine].append(
- assigned_task_type(
- start=solver.value(all_tasks[job_id, task_id].start),
- job=job_id,
- index=task_id,
- duration=task[1],
- )
- )
-
- # Create per machine output lines.
- output = ""
- for machine in all_machines:
- # Sort by starting time.
- assigned_jobs[machine].sort()
- sol_line_tasks = "Machine " + str(machine) + ": "
- sol_line = " "
-
- for assigned_task in assigned_jobs[machine]:
- name = f"job_{assigned_task.job}_{assigned_task.index}"
- # add spaces to output to align columns.
- sol_line_tasks += f"{name:>10}"
- start = assigned_task.start
- duration = assigned_task.duration
-
- sol_tmp = f"[{start}, {start + duration}]"
- # add spaces to output to align columns.
- sol_line += f"{sol_tmp:>10}"
-
- sol_line += "\n"
- sol_line_tasks += "\n"
- output += sol_line_tasks
- output += sol_line
-
- # Finally print the solution found.
- print(f"Optimal Schedule Length: {solver.objective_value}")
- print(output)
- print(solver.response_stats())
+ sol_tmp = f"[{start}, {start + duration}]"
+ # add spaces to output to align columns.
+ sol_line += f"{sol_tmp:>10}"
+
+ sol_line += "\n"
+ sol_line_tasks += "\n"
+ output += sol_line_tasks
+ output += sol_line
+
+ # Finally print the solution found.
+ print(f"Optimal Schedule Length: {solver.objective_value}")
+ print(output)
+ print(solver.response_stats())
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- jobshop_with_maintenance()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ jobshop_with_maintenance()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/knapsack_2d_sat.py b/examples/python/knapsack_2d_sat.py
index e7718215521..e1b5fd9eab9 100644
--- a/examples/python/knapsack_2d_sat.py
+++ b/examples/python/knapsack_2d_sat.py
@@ -44,8 +44,8 @@
def build_data() -> tuple[pd.Series, int, int]:
- """Build the data frame."""
- data = """
+ """Build the data frame."""
+ data = """
item width height available value color
k1 20 4 2 338.984 blue
k2 12 17 6 849.246 orange
@@ -59,360 +59,358 @@ def build_data() -> tuple[pd.Series, int, int]:
k10 9 11 5 369.560 cyan
"""
- data = pd.read_table(io.StringIO(data), sep=r"\s+")
- print("Input data")
- print(data)
+ data = pd.read_table(io.StringIO(data), sep=r"\s+")
+ print("Input data")
+ print(data)
- max_height = 20
- max_width = 30
+ max_height = 20
+ max_width = 30
- print(f"Container max_width:{max_width} max_height:{max_height}")
- print(f"#Items: {len(data.index)}")
- return (data, max_height, max_width)
+ print(f"Container max_width:{max_width} max_height:{max_height}")
+ print(f"#Items: {len(data.index)}")
+ return (data, max_height, max_width)
def solve_with_duplicate_items(
data: pd.Series, max_height: int, max_width: int
) -> None:
- """solve the problem by building 2 items (rotated or not) for each item."""
- # Derived data (expanded to individual items).
- data_widths = data["width"].to_numpy()
- data_heights = data["height"].to_numpy()
- data_availability = data["available"].to_numpy()
- data_values = data["value"].to_numpy()
-
- # Non duplicated items data.
- base_item_widths = np.repeat(data_widths, data_availability)
- base_item_heights = np.repeat(data_heights, data_availability)
- base_item_values = np.repeat(data_values, data_availability)
- num_data_items = len(base_item_values)
-
- # Create rotated items by duplicating.
- item_widths = np.concatenate((base_item_widths, base_item_heights))
- item_heights = np.concatenate((base_item_heights, base_item_widths))
- item_values = np.concatenate((base_item_values, base_item_values))
-
- num_items = len(item_values)
-
- # OR-Tools model
- model = cp_model.CpModel()
-
- # Variables
- x_starts = []
- x_ends = []
- y_starts = []
- y_ends = []
- is_used = []
- x_intervals = []
- y_intervals = []
-
- for i in range(num_items):
- ## Is the item used?
- is_used.append(model.new_bool_var(f"is_used{i}"))
-
- ## Item coordinates.
- x_starts.append(model.new_int_var(0, max_width, f"x_start{i}"))
- x_ends.append(model.new_int_var(0, max_width, f"x_end{i}"))
- y_starts.append(model.new_int_var(0, max_height, f"y_start{i}"))
- y_ends.append(model.new_int_var(0, max_height, f"y_end{i}"))
-
- ## Interval variables.
- x_intervals.append(
- model.new_interval_var(
- x_starts[i],
- item_widths[i] * is_used[i],
- x_ends[i],
- f"x_interval{i}",
- )
- )
- y_intervals.append(
- model.new_interval_var(
- y_starts[i],
- item_heights[i] * is_used[i],
- y_ends[i],
- f"y_interval{i}",
- )
+ """solve the problem by building 2 items (rotated or not) for each item."""
+ # Derived data (expanded to individual items).
+ data_widths = data["width"].to_numpy()
+ data_heights = data["height"].to_numpy()
+ data_availability = data["available"].to_numpy()
+ data_values = data["value"].to_numpy()
+
+ # Non duplicated items data.
+ base_item_widths = np.repeat(data_widths, data_availability)
+ base_item_heights = np.repeat(data_heights, data_availability)
+ base_item_values = np.repeat(data_values, data_availability)
+ num_data_items = len(base_item_values)
+
+ # Create rotated items by duplicating.
+ item_widths = np.concatenate((base_item_widths, base_item_heights))
+ item_heights = np.concatenate((base_item_heights, base_item_widths))
+ item_values = np.concatenate((base_item_values, base_item_values))
+
+ num_items = len(item_values)
+
+ # OR-Tools model
+ model = cp_model.CpModel()
+
+ # Variables
+ x_starts = []
+ x_ends = []
+ y_starts = []
+ y_ends = []
+ is_used = []
+ x_intervals = []
+ y_intervals = []
+
+ for i in range(num_items):
+ ## Is the item used?
+ is_used.append(model.new_bool_var(f"is_used{i}"))
+
+ ## Item coordinates.
+ x_starts.append(model.new_int_var(0, max_width, f"x_start{i}"))
+ x_ends.append(model.new_int_var(0, max_width, f"x_end{i}"))
+ y_starts.append(model.new_int_var(0, max_height, f"y_start{i}"))
+ y_ends.append(model.new_int_var(0, max_height, f"y_end{i}"))
+
+ ## Interval variables.
+ x_intervals.append(
+ model.new_interval_var(
+ x_starts[i],
+ item_widths[i] * is_used[i],
+ x_ends[i],
+ f"x_interval{i}",
)
-
- # Unused boxes are fixed at (0.0).
- model.add(x_starts[i] == 0).only_enforce_if(~is_used[i])
- model.add(y_starts[i] == 0).only_enforce_if(~is_used[i])
-
- # Constraints.
-
- ## Only one of non-rotated/rotated pair can be used.
- for i in range(num_data_items):
- model.add(is_used[i] + is_used[i + num_data_items] <= 1)
-
- ## 2D no overlap.
- model.add_no_overlap_2d(x_intervals, y_intervals)
-
- ## Objective.
- model.maximize(cp_model.LinearExpr.weighted_sum(is_used, item_values))
-
- # Output proto to file.
- if _OUTPUT_PROTO.value:
- print(f"Writing proto to {_OUTPUT_PROTO.value}")
- with open(_OUTPUT_PROTO.value, "w") as text_file:
- text_file.write(str(model))
-
- # Solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
-
- status = solver.solve(model)
-
- # Report solution.
- if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
- used = {i for i in range(num_items) if solver.boolean_value(is_used[i])}
- data = pd.DataFrame(
- {
- "x_start": [solver.value(x_starts[i]) for i in used],
- "y_start": [solver.value(y_starts[i]) for i in used],
- "item_width": [item_widths[i] for i in used],
- "item_height": [item_heights[i] for i in used],
- "x_end": [solver.value(x_ends[i]) for i in used],
- "y_end": [solver.value(y_ends[i]) for i in used],
- "item_value": [item_values[i] for i in used],
- }
+ )
+ y_intervals.append(
+ model.new_interval_var(
+ y_starts[i],
+ item_heights[i] * is_used[i],
+ y_ends[i],
+ f"y_interval{i}",
)
- print(data)
+ )
+
+ # Unused boxes are fixed at (0.0).
+ model.add(x_starts[i] == 0).only_enforce_if(~is_used[i])
+ model.add(y_starts[i] == 0).only_enforce_if(~is_used[i])
+
+ # Constraints.
+
+ ## Only one of non-rotated/rotated pair can be used.
+ for i in range(num_data_items):
+ model.add(is_used[i] + is_used[i + num_data_items] <= 1)
+
+ ## 2D no overlap.
+ model.add_no_overlap_2d(x_intervals, y_intervals)
+
+ ## Objective.
+ model.maximize(cp_model.LinearExpr.weighted_sum(is_used, item_values))
+
+ # Output proto to file.
+ if _OUTPUT_PROTO.value:
+ print(f"Writing proto to {_OUTPUT_PROTO.value}")
+ with open(_OUTPUT_PROTO.value, "w") as text_file:
+ text_file.write(str(model))
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+
+ status = solver.solve(model)
+
+ # Report solution.
+ if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
+ used = {i for i in range(num_items) if solver.boolean_value(is_used[i])}
+ data = pd.DataFrame({
+ "x_start": [solver.value(x_starts[i]) for i in used],
+ "y_start": [solver.value(y_starts[i]) for i in used],
+ "item_width": [item_widths[i] for i in used],
+ "item_height": [item_heights[i] for i in used],
+ "x_end": [solver.value(x_ends[i]) for i in used],
+ "y_end": [solver.value(y_ends[i]) for i in used],
+ "item_value": [item_values[i] for i in used],
+ })
+ print(data)
def solve_with_duplicate_optional_items(
data: pd.Series, max_height: int, max_width: int
):
- """solve the problem by building 2 optional items (rotated or not) for each item."""
- # Derived data (expanded to individual items).
- data_widths = data["width"].to_numpy()
- data_heights = data["height"].to_numpy()
- data_availability = data["available"].to_numpy()
- data_values = data["value"].to_numpy()
-
- # Non duplicated items data.
- base_item_widths = np.repeat(data_widths, data_availability)
- base_item_heights = np.repeat(data_heights, data_availability)
- base_item_values = np.repeat(data_values, data_availability)
- num_data_items = len(base_item_values)
-
- # Create rotated items by duplicating.
- item_widths = np.concatenate((base_item_widths, base_item_heights))
- item_heights = np.concatenate((base_item_heights, base_item_widths))
- item_values = np.concatenate((base_item_values, base_item_values))
-
- num_items = len(item_values)
-
- # OR-Tools model
- model = cp_model.CpModel()
-
- # Variables
- x_starts = []
- y_starts = []
- is_used = []
- x_intervals = []
- y_intervals = []
-
- for i in range(num_items):
- ## Is the item used?
- is_used.append(model.new_bool_var(f"is_used{i}"))
-
- ## Item coordinates.
- x_starts.append(
- model.new_int_var(0, max_width - int(item_widths[i]), f"x_start{i}")
- )
- y_starts.append(
- model.new_int_var(0, max_height - int(item_heights[i]), f"y_start{i}")
- )
-
- ## Interval variables.
- x_intervals.append(
- model.new_optional_fixed_size_interval_var(
- x_starts[i], item_widths[i], is_used[i], f"x_interval{i}"
- )
- )
- y_intervals.append(
- model.new_optional_fixed_size_interval_var(
- y_starts[i], item_heights[i], is_used[i], f"y_interval{i}"
- )
+ """solve the problem by building 2 optional items (rotated or not) for each item."""
+ # Derived data (expanded to individual items).
+ data_widths = data["width"].to_numpy()
+ data_heights = data["height"].to_numpy()
+ data_availability = data["available"].to_numpy()
+ data_values = data["value"].to_numpy()
+
+ # Non duplicated items data.
+ base_item_widths = np.repeat(data_widths, data_availability)
+ base_item_heights = np.repeat(data_heights, data_availability)
+ base_item_values = np.repeat(data_values, data_availability)
+ num_data_items = len(base_item_values)
+
+ # Create rotated items by duplicating.
+ item_widths = np.concatenate((base_item_widths, base_item_heights))
+ item_heights = np.concatenate((base_item_heights, base_item_widths))
+ item_values = np.concatenate((base_item_values, base_item_values))
+
+ num_items = len(item_values)
+
+ # OR-Tools model
+ model = cp_model.CpModel()
+
+ # Variables
+ x_starts = []
+ y_starts = []
+ is_used = []
+ x_intervals = []
+ y_intervals = []
+
+ for i in range(num_items):
+ ## Is the item used?
+ is_used.append(model.new_bool_var(f"is_used{i}"))
+
+ ## Item coordinates.
+ x_starts.append(
+ model.new_int_var(0, max_width - int(item_widths[i]), f"x_start{i}")
+ )
+ y_starts.append(
+ model.new_int_var(0, max_height - int(item_heights[i]), f"y_start{i}")
+ )
+
+ ## Interval variables.
+ x_intervals.append(
+ model.new_optional_fixed_size_interval_var(
+ x_starts[i], item_widths[i], is_used[i], f"x_interval{i}"
)
- # Unused boxes are fixed at (0.0).
- model.add(x_starts[i] == 0).only_enforce_if(~is_used[i])
- model.add(y_starts[i] == 0).only_enforce_if(~is_used[i])
-
- # Constraints.
-
- ## Only one of non-rotated/rotated pair can be used.
- for i in range(num_data_items):
- model.add(is_used[i] + is_used[i + num_data_items] <= 1)
-
- ## 2D no overlap.
- model.add_no_overlap_2d(x_intervals, y_intervals)
-
- ## Objective.
- model.maximize(cp_model.LinearExpr.weighted_sum(is_used, item_values))
-
- # Output proto to file.
- if _OUTPUT_PROTO.value:
- print(f"Writing proto to {_OUTPUT_PROTO.value}")
- with open(_OUTPUT_PROTO.value, "w") as text_file:
- text_file.write(str(model))
-
- # solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
-
- status = solver.solve(model)
-
- # Report solution.
- if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
- used = {i for i in range(num_items) if solver.boolean_value(is_used[i])}
- data = pd.DataFrame(
- {
- "x_start": [solver.value(x_starts[i]) for i in used],
- "y_start": [solver.value(y_starts[i]) for i in used],
- "item_width": [item_widths[i] for i in used],
- "item_height": [item_heights[i] for i in used],
- "x_end": [solver.value(x_starts[i]) + item_widths[i] for i in used],
- "y_end": [solver.value(y_starts[i]) + item_heights[i] for i in used],
- "item_value": [item_values[i] for i in used],
- }
+ )
+ y_intervals.append(
+ model.new_optional_fixed_size_interval_var(
+ y_starts[i], item_heights[i], is_used[i], f"y_interval{i}"
)
- print(data)
+ )
+ # Unused boxes are fixed at (0.0).
+ model.add(x_starts[i] == 0).only_enforce_if(~is_used[i])
+ model.add(y_starts[i] == 0).only_enforce_if(~is_used[i])
+
+ # Constraints.
+
+ ## Only one of non-rotated/rotated pair can be used.
+ for i in range(num_data_items):
+ model.add(is_used[i] + is_used[i + num_data_items] <= 1)
+
+ ## 2D no overlap.
+ model.add_no_overlap_2d(x_intervals, y_intervals)
+
+ ## Objective.
+ model.maximize(cp_model.LinearExpr.weighted_sum(is_used, item_values))
+
+ # Output proto to file.
+ if _OUTPUT_PROTO.value:
+ print(f"Writing proto to {_OUTPUT_PROTO.value}")
+ with open(_OUTPUT_PROTO.value, "w") as text_file:
+ text_file.write(str(model))
+
+ # solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+
+ status = solver.solve(model)
+
+ # Report solution.
+ if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
+ used = {i for i in range(num_items) if solver.boolean_value(is_used[i])}
+ data = pd.DataFrame({
+ "x_start": [solver.value(x_starts[i]) for i in used],
+ "y_start": [solver.value(y_starts[i]) for i in used],
+ "item_width": [item_widths[i] for i in used],
+ "item_height": [item_heights[i] for i in used],
+ "x_end": [solver.value(x_starts[i]) + item_widths[i] for i in used],
+ "y_end": [solver.value(y_starts[i]) + item_heights[i] for i in used],
+ "item_value": [item_values[i] for i in used],
+ })
+ print(data)
def solve_with_rotations(data: pd.Series, max_height: int, max_width: int):
- """solve the problem by rotating items."""
- # Derived data (expanded to individual items).
- data_widths = data["width"].to_numpy()
- data_heights = data["height"].to_numpy()
- data_availability = data["available"].to_numpy()
- data_values = data["value"].to_numpy()
-
- item_widths = np.repeat(data_widths, data_availability)
- item_heights = np.repeat(data_heights, data_availability)
- item_values = np.repeat(data_values, data_availability)
-
- num_items = len(item_widths)
-
- # OR-Tools model.
- model = cp_model.CpModel()
-
- # Coordinate variables for each rectangle.
- x_starts = []
- x_sizes = []
- x_ends = []
- y_starts = []
- y_sizes = []
- y_ends = []
- x_intervals = []
- y_intervals = []
-
- for i in range(num_items):
- sizes = [0, int(item_widths[i]), int(item_heights[i])]
- # X coordinates.
- x_starts.append(model.new_int_var(0, max_width, f"x_start{i}"))
- x_sizes.append(
- model.new_int_var_from_domain(
- cp_model.Domain.FromValues(sizes), f"x_size{i}"
- )
+ """solve the problem by rotating items."""
+ # Derived data (expanded to individual items).
+ data_widths = data["width"].to_numpy()
+ data_heights = data["height"].to_numpy()
+ data_availability = data["available"].to_numpy()
+ data_values = data["value"].to_numpy()
+
+ item_widths = np.repeat(data_widths, data_availability)
+ item_heights = np.repeat(data_heights, data_availability)
+ item_values = np.repeat(data_values, data_availability)
+
+ num_items = len(item_widths)
+
+ # OR-Tools model.
+ model = cp_model.CpModel()
+
+ # Coordinate variables for each rectangle.
+ x_starts = []
+ x_sizes = []
+ x_ends = []
+ y_starts = []
+ y_sizes = []
+ y_ends = []
+ x_intervals = []
+ y_intervals = []
+
+ for i in range(num_items):
+ sizes = [0, int(item_widths[i]), int(item_heights[i])]
+ # X coordinates.
+ x_starts.append(model.new_int_var(0, max_width, f"x_start{i}"))
+ x_sizes.append(
+ model.new_int_var_from_domain(
+ cp_model.Domain.FromValues(sizes), f"x_size{i}"
)
- x_ends.append(model.new_int_var(0, max_width, f"x_end{i}"))
-
- # Y coordinates.
- y_starts.append(model.new_int_var(0, max_height, f"y_start{i}"))
- y_sizes.append(
- model.new_int_var_from_domain(
- cp_model.Domain.FromValues(sizes), f"y_size{i}"
- )
+ )
+ x_ends.append(model.new_int_var(0, max_width, f"x_end{i}"))
+
+ # Y coordinates.
+ y_starts.append(model.new_int_var(0, max_height, f"y_start{i}"))
+ y_sizes.append(
+ model.new_int_var_from_domain(
+ cp_model.Domain.FromValues(sizes), f"y_size{i}"
)
- y_ends.append(model.new_int_var(0, max_height, f"y_end{i}"))
+ )
+ y_ends.append(model.new_int_var(0, max_height, f"y_end{i}"))
- ## Interval variables
- x_intervals.append(
- model.new_interval_var(x_starts[i], x_sizes[i], x_ends[i], f"x_interval{i}")
+ ## Interval variables
+ x_intervals.append(
+ model.new_interval_var(
+ x_starts[i], x_sizes[i], x_ends[i], f"x_interval{i}"
)
- y_intervals.append(
- model.new_interval_var(y_starts[i], y_sizes[i], y_ends[i], f"y_interval{i}")
+ )
+ y_intervals.append(
+ model.new_interval_var(
+ y_starts[i], y_sizes[i], y_ends[i], f"y_interval{i}"
)
-
- # is_used[i] == True if and only if item i is selected.
- is_used = []
-
- # Constraints.
-
- ## for each item, decide is unselected, no_rotation, rotated.
- for i in range(num_items):
- not_selected = model.new_bool_var(f"not_selected_{i}")
- no_rotation = model.new_bool_var(f"no_rotation_{i}")
- rotated = model.new_bool_var(f"rotated_{i}")
-
- ### Exactly one state must be chosen.
- model.add_exactly_one(not_selected, no_rotation, rotated)
-
- ### Define height and width according to the state.
- dim1 = item_widths[i]
- dim2 = item_heights[i]
- # Unused boxes are fixed at (0.0).
- model.add(x_sizes[i] == 0).only_enforce_if(not_selected)
- model.add(y_sizes[i] == 0).only_enforce_if(not_selected)
- model.add(x_starts[i] == 0).only_enforce_if(not_selected)
- model.add(y_starts[i] == 0).only_enforce_if(not_selected)
- # Sizes are fixed by the rotation.
- model.add(x_sizes[i] == dim1).only_enforce_if(no_rotation)
- model.add(y_sizes[i] == dim2).only_enforce_if(no_rotation)
- model.add(x_sizes[i] == dim2).only_enforce_if(rotated)
- model.add(y_sizes[i] == dim1).only_enforce_if(rotated)
-
- is_used.append(~not_selected)
-
- ## 2D no overlap.
- model.add_no_overlap_2d(x_intervals, y_intervals)
-
- # Objective.
- model.maximize(cp_model.LinearExpr.weighted_sum(is_used, item_values))
-
- # Output proto to file.
- if _OUTPUT_PROTO.value:
- print(f"Writing proto to {_OUTPUT_PROTO.value}")
- with open(_OUTPUT_PROTO.value, "w") as text_file:
- text_file.write(str(model))
-
- # solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
-
- status = solver.solve(model)
-
- # Report solution.
- if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
- used = {i for i in range(num_items) if solver.boolean_value(is_used[i])}
- data = pd.DataFrame(
- {
- "x_start": [solver.value(x_starts[i]) for i in used],
- "y_start": [solver.value(y_starts[i]) for i in used],
- "item_width": [solver.value(x_sizes[i]) for i in used],
- "item_height": [solver.value(y_sizes[i]) for i in used],
- "x_end": [solver.value(x_ends[i]) for i in used],
- "y_end": [solver.value(y_ends[i]) for i in used],
- "item_value": [item_values[i] for i in used],
- }
- )
- print(data)
+ )
+
+ # is_used[i] == True if and only if item i is selected.
+ is_used = []
+
+ # Constraints.
+
+ ## for each item, decide is unselected, no_rotation, rotated.
+ for i in range(num_items):
+ not_selected = model.new_bool_var(f"not_selected_{i}")
+ no_rotation = model.new_bool_var(f"no_rotation_{i}")
+ rotated = model.new_bool_var(f"rotated_{i}")
+
+ ### Exactly one state must be chosen.
+ model.add_exactly_one(not_selected, no_rotation, rotated)
+
+ ### Define height and width according to the state.
+ dim1 = item_widths[i]
+ dim2 = item_heights[i]
+ # Unused boxes are fixed at (0.0).
+ model.add(x_sizes[i] == 0).only_enforce_if(not_selected)
+ model.add(y_sizes[i] == 0).only_enforce_if(not_selected)
+ model.add(x_starts[i] == 0).only_enforce_if(not_selected)
+ model.add(y_starts[i] == 0).only_enforce_if(not_selected)
+ # Sizes are fixed by the rotation.
+ model.add(x_sizes[i] == dim1).only_enforce_if(no_rotation)
+ model.add(y_sizes[i] == dim2).only_enforce_if(no_rotation)
+ model.add(x_sizes[i] == dim2).only_enforce_if(rotated)
+ model.add(y_sizes[i] == dim1).only_enforce_if(rotated)
+
+ is_used.append(~not_selected)
+
+ ## 2D no overlap.
+ model.add_no_overlap_2d(x_intervals, y_intervals)
+
+ # Objective.
+ model.maximize(cp_model.LinearExpr.weighted_sum(is_used, item_values))
+
+ # Output proto to file.
+ if _OUTPUT_PROTO.value:
+ print(f"Writing proto to {_OUTPUT_PROTO.value}")
+ with open(_OUTPUT_PROTO.value, "w") as text_file:
+ text_file.write(str(model))
+
+ # solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+
+ status = solver.solve(model)
+
+ # Report solution.
+ if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
+ used = {i for i in range(num_items) if solver.boolean_value(is_used[i])}
+ data = pd.DataFrame({
+ "x_start": [solver.value(x_starts[i]) for i in used],
+ "y_start": [solver.value(y_starts[i]) for i in used],
+ "item_width": [solver.value(x_sizes[i]) for i in used],
+ "item_height": [solver.value(y_sizes[i]) for i in used],
+ "x_end": [solver.value(x_ends[i]) for i in used],
+ "y_end": [solver.value(y_ends[i]) for i in used],
+ "item_value": [item_values[i] for i in used],
+ })
+ print(data)
def main(_):
- """solve the problem with all models."""
- data, max_height, max_width = build_data()
- if _MODEL.value == "duplicate":
- solve_with_duplicate_items(data, max_height, max_width)
- elif _MODEL.value == "optional":
- solve_with_duplicate_optional_items(data, max_height, max_width)
- else:
- solve_with_rotations(data, max_height, max_width)
+ """solve the problem with all models."""
+ data, max_height, max_width = build_data()
+ if _MODEL.value == "duplicate":
+ solve_with_duplicate_items(data, max_height, max_width)
+ elif _MODEL.value == "optional":
+ solve_with_duplicate_optional_items(data, max_height, max_width)
+ else:
+ solve_with_rotations(data, max_height, max_width)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/line_balancing_sat.py b/examples/python/line_balancing_sat.py
index c80a747d3a0..060bcecd7e0 100644
--- a/examples/python/line_balancing_sat.py
+++ b/examples/python/line_balancing_sat.py
@@ -48,315 +48,315 @@
class SectionInfo:
- """Store problem information for each section of the input file."""
+ """Store problem information for each section of the input file."""
- def __init__(self):
- self.value = None
- self.index_map = {}
- self.set_of_pairs = set()
+ def __init__(self):
+ self.value = None
+ self.index_map = {}
+ self.set_of_pairs = set()
- def __str__(self):
- if self.index_map:
- return f"SectionInfo(index_map={self.index_map})"
- elif self.set_of_pairs:
- return f"SectionInfo(set_of_pairs={self.set_of_pairs})"
- elif self.value is not None:
- return f"SectionInfo(value={self.value})"
- else:
- return "SectionInfo()"
+ def __str__(self):
+ if self.index_map:
+ return f"SectionInfo(index_map={self.index_map})"
+ elif self.set_of_pairs:
+ return f"SectionInfo(set_of_pairs={self.set_of_pairs})"
+ elif self.value is not None:
+ return f"SectionInfo(value={self.value})"
+ else:
+ return "SectionInfo()"
def read_problem(filename: str) -> Dict[str, SectionInfo]:
- """Reads a .alb file and returns the problem."""
+ """Reads a .alb file and returns the problem."""
- current_info = SectionInfo()
+ current_info = SectionInfo()
- problem: Dict[str, SectionInfo] = {}
- with open(filename, "r") as input_file:
- print(f"Reading problem from '{filename}'")
+ problem: Dict[str, SectionInfo] = {}
+ with open(filename, "r") as input_file:
+ print(f"Reading problem from '{filename}'")
- for line in input_file:
- stripped_line = line.strip()
- if not stripped_line:
- continue
+ for line in input_file:
+ stripped_line = line.strip()
+ if not stripped_line:
+ continue
- match_section_def = re.fullmatch(r"<([\w\s]+)>", stripped_line)
- if match_section_def:
- section_name = match_section_def.group(1)
- if section_name == "end":
- continue
+ match_section_def = re.fullmatch(r"<([\w\s]+)>", stripped_line)
+ if match_section_def:
+ section_name = match_section_def.group(1)
+ if section_name == "end":
+ continue
- current_info = SectionInfo()
- problem[section_name] = current_info
- continue
+ current_info = SectionInfo()
+ problem[section_name] = current_info
+ continue
- match_single_number = re.fullmatch(r"^([0-9]+)$", stripped_line)
- if match_single_number:
- current_info.value = int(match_single_number.group(1))
- continue
+ match_single_number = re.fullmatch(r"^([0-9]+)$", stripped_line)
+ if match_single_number:
+ current_info.value = int(match_single_number.group(1))
+ continue
- match_key_value = re.fullmatch(r"^([0-9]+)\s+([0-9]+)$", stripped_line)
- if match_key_value:
- key = int(match_key_value.group(1))
- value = int(match_key_value.group(2))
- current_info.index_map[key] = value
- continue
+ match_key_value = re.fullmatch(r"^([0-9]+)\s+([0-9]+)$", stripped_line)
+ if match_key_value:
+ key = int(match_key_value.group(1))
+ value = int(match_key_value.group(2))
+ current_info.index_map[key] = value
+ continue
- match_pair = re.fullmatch(r"^([0-9]+),([0-9]+)$", stripped_line)
- if match_pair:
- left = int(match_pair.group(1))
- right = int(match_pair.group(2))
- current_info.set_of_pairs.add((left, right))
- continue
+ match_pair = re.fullmatch(r"^([0-9]+),([0-9]+)$", stripped_line)
+ if match_pair:
+ left = int(match_pair.group(1))
+ right = int(match_pair.group(2))
+ current_info.set_of_pairs.add((left, right))
+ continue
- print(f"Unrecognized line '{stripped_line}'")
+ print(f"Unrecognized line '{stripped_line}'")
- return problem
+ return problem
def print_stats(problem: Dict[str, SectionInfo]) -> None:
- print("Problem Statistics")
- for key, value in problem.items():
- print(f" - {key}: {value}")
+ print("Problem Statistics")
+ for key, value in problem.items():
+ print(f" - {key}: {value}")
def solve_problem_greedily(problem: Dict[str, SectionInfo]) -> Dict[int, int]:
- """Compute a greedy solution."""
- print("Solving using a Greedy heuristics")
+ """Compute a greedy solution."""
+ print("Solving using a Greedy heuristics")
- num_tasks = problem["number of tasks"].value
- if num_tasks is None:
- return {}
- all_tasks = range(1, num_tasks + 1) # Tasks are 1 based in the data.
- precedences = problem["precedence relations"].set_of_pairs
- durations = problem["task times"].index_map
- cycle_time = problem["cycle time"].value
+ num_tasks = problem["number of tasks"].value
+ if num_tasks is None:
+ return {}
+ all_tasks = range(1, num_tasks + 1) # Tasks are 1 based in the data.
+ precedences = problem["precedence relations"].set_of_pairs
+ durations = problem["task times"].index_map
+ cycle_time = problem["cycle time"].value
- weights = collections.defaultdict(int)
- successors = collections.defaultdict(list)
+ weights = collections.defaultdict(int)
+ successors = collections.defaultdict(list)
- candidates = set(all_tasks)
+ candidates = set(all_tasks)
- for before, after in precedences:
- weights[after] += 1
- successors[before].append(after)
- if after in candidates:
- candidates.remove(after)
+ for before, after in precedences:
+ weights[after] += 1
+ successors[before].append(after)
+ if after in candidates:
+ candidates.remove(after)
- assignment: Dict[int, int] = {}
- current_pod = 0
- residual_capacity = cycle_time
+ assignment: Dict[int, int] = {}
+ current_pod = 0
+ residual_capacity = cycle_time
- while len(assignment) < num_tasks:
- if not candidates:
- print("error empty")
- break
+ while len(assignment) < num_tasks:
+ if not candidates:
+ print("error empty")
+ break
- best = -1
- best_slack = cycle_time
- best_duration = 0
+ best = -1
+ best_slack = cycle_time
+ best_duration = 0
- for c in candidates:
- duration = durations[c]
- slack = residual_capacity - duration
- if slack < best_slack and slack >= 0:
- best_slack = slack
- best = c
- best_duration = duration
+ for c in candidates:
+ duration = durations[c]
+ slack = residual_capacity - duration
+ if slack < best_slack and slack >= 0:
+ best_slack = slack
+ best = c
+ best_duration = duration
- if best == -1:
- current_pod += 1
- residual_capacity = cycle_time
- continue
+ if best == -1:
+ current_pod += 1
+ residual_capacity = cycle_time
+ continue
- candidates.remove(best)
- assignment[best] = current_pod
- residual_capacity -= best_duration
+ candidates.remove(best)
+ assignment[best] = current_pod
+ residual_capacity -= best_duration
- for succ in successors[best]:
- weights[succ] -= 1
- if weights[succ] == 0:
- candidates.add(succ)
- del weights[succ]
+ for succ in successors[best]:
+ weights[succ] -= 1
+ if weights[succ] == 0:
+ candidates.add(succ)
+ del weights[succ]
- print(f" greedy solution uses {current_pod + 1} pods.")
+ print(f" greedy solution uses {current_pod + 1} pods.")
- return assignment
+ return assignment
def solve_problem_with_boolean_model(
problem: Dict[str, SectionInfo], hint: Dict[int, int]
) -> None:
- """solve the given problem."""
-
- print("Solving using the Boolean model")
- # problem data
- num_tasks = problem["number of tasks"].value
- if num_tasks is None:
- return
- all_tasks = range(1, num_tasks + 1) # Tasks are 1 based in the problem.
- durations = problem["task times"].index_map
- precedences = problem["precedence relations"].set_of_pairs
- cycle_time = problem["cycle time"].value
-
- num_pods = max(p for _, p in hint.items()) + 1 if hint else num_tasks - 1
- all_pods = range(num_pods)
-
- model = cp_model.CpModel()
-
- # assign[t, p] indicates if task t is done on pod p.
- assign = {}
- # possible[t, p] indicates if task t is possible on pod p.
- possible = {}
-
- # Create the variables
- for t in all_tasks:
- for p in all_pods:
- assign[t, p] = model.new_bool_var(f"assign_{t}_{p}")
- possible[t, p] = model.new_bool_var(f"possible_{t}_{p}")
-
- # active[p] indicates if pod p is active.
- active = [model.new_bool_var(f"active_{p}") for p in all_pods]
-
- # Each task is done on exactly one pod.
- for t in all_tasks:
- model.add_exactly_one([assign[t, p] for p in all_pods])
-
- # Total tasks assigned to one pod cannot exceed cycle time.
+ """solve the given problem."""
+
+ print("Solving using the Boolean model")
+ # problem data
+ num_tasks = problem["number of tasks"].value
+ if num_tasks is None:
+ return
+ all_tasks = range(1, num_tasks + 1) # Tasks are 1 based in the problem.
+ durations = problem["task times"].index_map
+ precedences = problem["precedence relations"].set_of_pairs
+ cycle_time = problem["cycle time"].value
+
+ num_pods = max(p for _, p in hint.items()) + 1 if hint else num_tasks - 1
+ all_pods = range(num_pods)
+
+ model = cp_model.CpModel()
+
+ # assign[t, p] indicates if task t is done on pod p.
+ assign = {}
+ # possible[t, p] indicates if task t is possible on pod p.
+ possible = {}
+
+ # Create the variables
+ for t in all_tasks:
for p in all_pods:
- model.add(sum(assign[t, p] * durations[t] for t in all_tasks) <= cycle_time)
+ assign[t, p] = model.new_bool_var(f"assign_{t}_{p}")
+ possible[t, p] = model.new_bool_var(f"possible_{t}_{p}")
- # Maintain the possible variables:
- # possible at pod p -> possible at any pod after p
- for t in all_tasks:
- for p in range(num_pods - 1):
- model.add_implication(possible[t, p], possible[t, p + 1])
+ # active[p] indicates if pod p is active.
+ active = [model.new_bool_var(f"active_{p}") for p in all_pods]
- # Link possible and active variables.
- for t in all_tasks:
- for p in all_pods:
- model.add_implication(assign[t, p], possible[t, p])
- if p > 1:
- model.add_implication(assign[t, p], ~possible[t, p - 1])
+ # Each task is done on exactly one pod.
+ for t in all_tasks:
+ model.add_exactly_one([assign[t, p] for p in all_pods])
+
+ # Total tasks assigned to one pod cannot exceed cycle time.
+ for p in all_pods:
+ model.add(sum(assign[t, p] * durations[t] for t in all_tasks) <= cycle_time)
- # Precedences.
- for before, after in precedences:
- for p in range(1, num_pods):
- model.add_implication(assign[before, p], ~possible[after, p - 1])
+ # Maintain the possible variables:
+ # possible at pod p -> possible at any pod after p
+ for t in all_tasks:
+ for p in range(num_pods - 1):
+ model.add_implication(possible[t, p], possible[t, p + 1])
- # Link active variables with the assign one.
+ # Link possible and active variables.
+ for t in all_tasks:
for p in all_pods:
- all_assign_vars = [assign[t, p] for t in all_tasks]
- for a in all_assign_vars:
- model.add_implication(a, active[p])
- model.add_bool_or(all_assign_vars + [~active[p]])
+ model.add_implication(assign[t, p], possible[t, p])
+ if p > 1:
+ model.add_implication(assign[t, p], ~possible[t, p - 1])
- # Force pods to be contiguous. This is critical to get good lower bounds
- # on the objective, even if it makes feasibility harder.
+ # Precedences.
+ for before, after in precedences:
for p in range(1, num_pods):
- model.add_implication(~active[p - 1], ~active[p])
- for t in all_tasks:
- model.add_implication(~active[p], possible[t, p - 1])
+ model.add_implication(assign[before, p], ~possible[after, p - 1])
+
+ # Link active variables with the assign one.
+ for p in all_pods:
+ all_assign_vars = [assign[t, p] for t in all_tasks]
+ for a in all_assign_vars:
+ model.add_implication(a, active[p])
+ model.add_bool_or(all_assign_vars + [~active[p]])
+
+ # Force pods to be contiguous. This is critical to get good lower bounds
+ # on the objective, even if it makes feasibility harder.
+ for p in range(1, num_pods):
+ model.add_implication(~active[p - 1], ~active[p])
+ for t in all_tasks:
+ model.add_implication(~active[p], possible[t, p - 1])
- # Objective.
- model.minimize(sum(active))
+ # Objective.
+ model.minimize(sum(active))
- # add search hinting from the greedy solution.
- for t in all_tasks:
- model.add_hint(assign[t, hint[t]], 1)
+ # add search hinting from the greedy solution.
+ for t in all_tasks:
+ model.add_hint(assign[t, hint[t]], 1)
- if _OUTPUT_PROTO.value:
- print(f"Writing proto to {_OUTPUT_PROTO.value}")
- model.export_to_file(_OUTPUT_PROTO.value)
+ if _OUTPUT_PROTO.value:
+ print(f"Writing proto to {_OUTPUT_PROTO.value}")
+ model.export_to_file(_OUTPUT_PROTO.value)
- # solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
- solver.parameters.log_search_progress = True
- solver.solve(model)
+ # solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+ solver.parameters.log_search_progress = True
+ solver.solve(model)
def solve_problem_with_scheduling_model(
problem: Dict[str, SectionInfo], hint: Dict[int, int]
) -> None:
- """solve the given problem using a cumulative model."""
-
- print("Solving using the scheduling model")
- # Problem data
- num_tasks = problem["number of tasks"].value
- if num_tasks is None:
- return
- all_tasks = range(1, num_tasks + 1) # Tasks are 1 based in the data.
- durations = problem["task times"].index_map
- precedences = problem["precedence relations"].set_of_pairs
- cycle_time = problem["cycle time"].value
-
- num_pods = max(p for _, p in hint.items()) + 1 if hint else num_tasks
-
- model = cp_model.CpModel()
-
- # pod[t] indicates on which pod the task is performed.
- pods = {}
- for t in all_tasks:
- pods[t] = model.new_int_var(0, num_pods - 1, f"pod_{t}")
-
- # Create the variables
- intervals = []
- demands = []
- for t in all_tasks:
- interval = model.new_fixed_size_interval_var(pods[t], 1, "")
- intervals.append(interval)
- demands.append(durations[t])
-
- # add terminating interval as the objective.
- obj_var = model.new_int_var(1, num_pods, "obj_var")
- obj_size = model.new_int_var(1, num_pods, "obj_duration")
- obj_interval = model.new_interval_var(
- obj_var, obj_size, num_pods + 1, "obj_interval"
- )
- intervals.append(obj_interval)
- demands.append(cycle_time)
-
- # Cumulative constraint.
- model.add_cumulative(intervals, demands, cycle_time)
-
- # Precedences.
- for before, after in precedences:
- model.add(pods[after] >= pods[before])
-
- # Objective.
- model.minimize(obj_var)
-
- # add search hinting from the greedy solution.
- for t in all_tasks:
- model.add_hint(pods[t], hint[t])
-
- if _OUTPUT_PROTO.value:
- print(f"Writing proto to{_OUTPUT_PROTO.value}")
- model.export_to_file(_OUTPUT_PROTO.value)
-
- # solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
- solver.parameters.log_search_progress = True
- solver.solve(model)
+ """solve the given problem using a cumulative model."""
+
+ print("Solving using the scheduling model")
+ # Problem data
+ num_tasks = problem["number of tasks"].value
+ if num_tasks is None:
+ return
+ all_tasks = range(1, num_tasks + 1) # Tasks are 1 based in the data.
+ durations = problem["task times"].index_map
+ precedences = problem["precedence relations"].set_of_pairs
+ cycle_time = problem["cycle time"].value
+
+ num_pods = max(p for _, p in hint.items()) + 1 if hint else num_tasks
+
+ model = cp_model.CpModel()
+
+ # pod[t] indicates on which pod the task is performed.
+ pods = {}
+ for t in all_tasks:
+ pods[t] = model.new_int_var(0, num_pods - 1, f"pod_{t}")
+
+ # Create the variables
+ intervals = []
+ demands = []
+ for t in all_tasks:
+ interval = model.new_fixed_size_interval_var(pods[t], 1, "")
+ intervals.append(interval)
+ demands.append(durations[t])
+
+ # add terminating interval as the objective.
+ obj_var = model.new_int_var(1, num_pods, "obj_var")
+ obj_size = model.new_int_var(1, num_pods, "obj_duration")
+ obj_interval = model.new_interval_var(
+ obj_var, obj_size, num_pods + 1, "obj_interval"
+ )
+ intervals.append(obj_interval)
+ demands.append(cycle_time)
+
+ # Cumulative constraint.
+ model.add_cumulative(intervals, demands, cycle_time)
+
+ # Precedences.
+ for before, after in precedences:
+ model.add(pods[after] >= pods[before])
+
+ # Objective.
+ model.minimize(obj_var)
+
+ # add search hinting from the greedy solution.
+ for t in all_tasks:
+ model.add_hint(pods[t], hint[t])
+
+ if _OUTPUT_PROTO.value:
+ print(f"Writing proto to{_OUTPUT_PROTO.value}")
+ model.export_to_file(_OUTPUT_PROTO.value)
+
+ # solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+ solver.parameters.log_search_progress = True
+ solver.solve(model)
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
- problem = read_problem(_INPUT.value)
- print_stats(problem)
- greedy_solution = solve_problem_greedily(problem)
+ problem = read_problem(_INPUT.value)
+ print_stats(problem)
+ greedy_solution = solve_problem_greedily(problem)
- if _MODEL.value == "boolean":
- solve_problem_with_boolean_model(problem, greedy_solution)
- elif _MODEL.value == "scheduling":
- solve_problem_with_scheduling_model(problem, greedy_solution)
+ if _MODEL.value == "boolean":
+ solve_problem_with_boolean_model(problem, greedy_solution)
+ elif _MODEL.value == "scheduling":
+ solve_problem_with_scheduling_model(problem, greedy_solution)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/linear_assignment_api.py b/examples/python/linear_assignment_api.py
index 34514974e4b..d569e7fab10 100644
--- a/examples/python/linear_assignment_api.py
+++ b/examples/python/linear_assignment_api.py
@@ -14,9 +14,9 @@
"""Test linear sum assignment on a 4x4 matrix.
- Example taken from:
- http://www.ee.oulu.fi/~mpa/matreng/eem1_2-1.htm with kCost[0][1]
- modified so the optimum solution is unique.
+Example taken from:
+http://www.ee.oulu.fi/~mpa/matreng/eem1_2-1.htm with kCost[0][1]
+modified so the optimum solution is unique.
"""
from typing import Sequence
@@ -25,37 +25,42 @@
def run_assignment_on_4x4_matrix():
- """Test linear sum assignment on a 4x4 matrix."""
- num_sources = 4
- num_targets = 4
- cost = [[90, 76, 75, 80], [35, 85, 55, 65], [125, 95, 90, 105], [45, 110, 95, 115]]
- expected_cost = cost[0][3] + cost[1][2] + cost[2][1] + cost[3][0]
-
- assignment = linear_sum_assignment.SimpleLinearSumAssignment()
- for source in range(0, num_sources):
- for target in range(0, num_targets):
- assignment.add_arc_with_cost(source, target, cost[source][target])
-
- solve_status = assignment.solve()
- if solve_status == assignment.OPTIMAL:
- print("Successful solve.")
- print("Total cost", assignment.optimal_cost(), "/", expected_cost)
- for i in range(0, assignment.num_nodes()):
- print(
- "Left node %d assigned to right node %d with cost %d."
- % (i, assignment.right_mate(i), assignment.assignment_cost(i))
- )
- elif solve_status == assignment.INFEASIBLE:
- print("No perfect matching exists.")
- elif solve_status == assignment.POSSIBLE_OVERFLOW:
- print("Some input costs are too large and may cause an integer overflow.")
+ """Test linear sum assignment on a 4x4 matrix."""
+ num_sources = 4
+ num_targets = 4
+ cost = [
+ [90, 76, 75, 80],
+ [35, 85, 55, 65],
+ [125, 95, 90, 105],
+ [45, 110, 95, 115],
+ ]
+ expected_cost = cost[0][3] + cost[1][2] + cost[2][1] + cost[3][0]
+
+ assignment = linear_sum_assignment.SimpleLinearSumAssignment()
+ for source in range(0, num_sources):
+ for target in range(0, num_targets):
+ assignment.add_arc_with_cost(source, target, cost[source][target])
+
+ solve_status = assignment.solve()
+ if solve_status == assignment.OPTIMAL:
+ print("Successful solve.")
+ print("Total cost", assignment.optimal_cost(), "/", expected_cost)
+ for i in range(0, assignment.num_nodes()):
+ print(
+ "Left node %d assigned to right node %d with cost %d."
+ % (i, assignment.right_mate(i), assignment.assignment_cost(i))
+ )
+ elif solve_status == assignment.INFEASIBLE:
+ print("No perfect matching exists.")
+ elif solve_status == assignment.POSSIBLE_OVERFLOW:
+ print("Some input costs are too large and may cause an integer overflow.")
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- run_assignment_on_4x4_matrix()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ run_assignment_on_4x4_matrix()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/linear_programming.py b/examples/python/linear_programming.py
index a29847ce596..00e285b7cec 100644
--- a/examples/python/linear_programming.py
+++ b/examples/python/linear_programming.py
@@ -18,134 +18,136 @@
def Announce(solver, api_type):
- print(
- "---- Linear programming example with " + solver + " (" + api_type + ") -----"
- )
+ print(
+ "---- Linear programming example with "
+ + solver
+ + " ("
+ + api_type
+ + ") -----"
+ )
def RunLinearExampleNaturalLanguageAPI(optimization_problem_type):
- """Example of simple linear program with natural language API."""
- solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
+ """Example of simple linear program with natural language API."""
+ solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
- if not solver:
- return
+ if not solver:
+ return
- Announce(optimization_problem_type, "natural language API")
+ Announce(optimization_problem_type, "natural language API")
- infinity = solver.infinity()
- # x1, x2 and x3 are continuous non-negative variables.
- x1 = solver.NumVar(0.0, infinity, "x1")
- x2 = solver.NumVar(0.0, infinity, "x2")
- x3 = solver.NumVar(0.0, infinity, "x3")
+ infinity = solver.infinity()
+ # x1, x2 and x3 are continuous non-negative variables.
+ x1 = solver.NumVar(0.0, infinity, "x1")
+ x2 = solver.NumVar(0.0, infinity, "x2")
+ x3 = solver.NumVar(0.0, infinity, "x3")
- solver.Maximize(10 * x1 + 6 * x2 + 4 * x3)
- c0 = solver.Add(10 * x1 + 4 * x2 + 5 * x3 <= 600, "ConstraintName0")
- c1 = solver.Add(2 * x1 + 2 * x2 + 6 * x3 <= 300)
- sum_of_vars = sum([x1, x2, x3])
- c2 = solver.Add(sum_of_vars <= 100.0, "OtherConstraintName")
+ solver.Maximize(10 * x1 + 6 * x2 + 4 * x3)
+ c0 = solver.Add(10 * x1 + 4 * x2 + 5 * x3 <= 600, "ConstraintName0")
+ c1 = solver.Add(2 * x1 + 2 * x2 + 6 * x3 <= 300)
+ sum_of_vars = sum([x1, x2, x3])
+ c2 = solver.Add(sum_of_vars <= 100.0, "OtherConstraintName")
- SolveAndPrint(
- solver, [x1, x2, x3], [c0, c1, c2], optimization_problem_type != "PDLP"
- )
- # Print a linear expression's solution value.
- print("Sum of vars: %s = %s" % (sum_of_vars, sum_of_vars.solution_value()))
+ SolveAndPrint(
+ solver, [x1, x2, x3], [c0, c1, c2], optimization_problem_type != "PDLP"
+ )
+ # Print a linear expression's solution value.
+ print("Sum of vars: %s = %s" % (sum_of_vars, sum_of_vars.solution_value()))
def RunLinearExampleCppStyleAPI(optimization_problem_type):
- """Example of simple linear program with the C++ style API."""
- solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
- if not solver:
- return
-
- Announce(optimization_problem_type, "C++ style API")
-
- infinity = solver.infinity()
- # x1, x2 and x3 are continuous non-negative variables.
- x1 = solver.NumVar(0.0, infinity, "x1")
- x2 = solver.NumVar(0.0, infinity, "x2")
- x3 = solver.NumVar(0.0, infinity, "x3")
-
- # Maximize 10 * x1 + 6 * x2 + 4 * x3.
- objective = solver.Objective()
- objective.SetCoefficient(x1, 10)
- objective.SetCoefficient(x2, 6)
- objective.SetCoefficient(x3, 4)
- objective.SetMaximization()
-
- # x1 + x2 + x3 <= 100.
- c0 = solver.Constraint(-infinity, 100.0, "c0")
- c0.SetCoefficient(x1, 1)
- c0.SetCoefficient(x2, 1)
- c0.SetCoefficient(x3, 1)
-
- # 10 * x1 + 4 * x2 + 5 * x3 <= 600.
- c1 = solver.Constraint(-infinity, 600.0, "c1")
- c1.SetCoefficient(x1, 10)
- c1.SetCoefficient(x2, 4)
- c1.SetCoefficient(x3, 5)
-
- # 2 * x1 + 2 * x2 + 6 * x3 <= 300.
- c2 = solver.Constraint(-infinity, 300.0, "c2")
- c2.SetCoefficient(x1, 2)
- c2.SetCoefficient(x2, 2)
- c2.SetCoefficient(x3, 6)
-
- SolveAndPrint(
- solver, [x1, x2, x3], [c0, c1, c2], optimization_problem_type != "PDLP"
- )
+ """Example of simple linear program with the C++ style API."""
+ solver = pywraplp.Solver.CreateSolver(optimization_problem_type)
+ if not solver:
+ return
+
+ Announce(optimization_problem_type, "C++ style API")
+
+ infinity = solver.infinity()
+ # x1, x2 and x3 are continuous non-negative variables.
+ x1 = solver.NumVar(0.0, infinity, "x1")
+ x2 = solver.NumVar(0.0, infinity, "x2")
+ x3 = solver.NumVar(0.0, infinity, "x3")
+
+ # Maximize 10 * x1 + 6 * x2 + 4 * x3.
+ objective = solver.Objective()
+ objective.SetCoefficient(x1, 10)
+ objective.SetCoefficient(x2, 6)
+ objective.SetCoefficient(x3, 4)
+ objective.SetMaximization()
+
+ # x1 + x2 + x3 <= 100.
+ c0 = solver.Constraint(-infinity, 100.0, "c0")
+ c0.SetCoefficient(x1, 1)
+ c0.SetCoefficient(x2, 1)
+ c0.SetCoefficient(x3, 1)
+
+ # 10 * x1 + 4 * x2 + 5 * x3 <= 600.
+ c1 = solver.Constraint(-infinity, 600.0, "c1")
+ c1.SetCoefficient(x1, 10)
+ c1.SetCoefficient(x2, 4)
+ c1.SetCoefficient(x3, 5)
+
+ # 2 * x1 + 2 * x2 + 6 * x3 <= 300.
+ c2 = solver.Constraint(-infinity, 300.0, "c2")
+ c2.SetCoefficient(x1, 2)
+ c2.SetCoefficient(x2, 2)
+ c2.SetCoefficient(x3, 6)
+
+ SolveAndPrint(
+ solver, [x1, x2, x3], [c0, c1, c2], optimization_problem_type != "PDLP"
+ )
def SolveAndPrint(solver, variable_list, constraint_list, is_precise):
- """Solve the problem and print the solution."""
- print("Number of variables = %d" % solver.NumVariables())
- print("Number of constraints = %d" % solver.NumConstraints())
+ """Solve the problem and print the solution."""
+ print("Number of variables = %d" % solver.NumVariables())
+ print("Number of constraints = %d" % solver.NumConstraints())
- result_status = solver.Solve()
+ result_status = solver.Solve()
- # The problem has an optimal solution.
- assert result_status == pywraplp.Solver.OPTIMAL
+ # The problem has an optimal solution.
+ assert result_status == pywraplp.Solver.OPTIMAL
- # The solution looks legit (when using solvers others than
- # GLOP_LINEAR_PROGRAMMING, verifying the solution is highly recommended!).
- if is_precise:
- assert solver.VerifySolution(1e-7, True)
+ # The solution looks legit (when using solvers others than
+ # GLOP_LINEAR_PROGRAMMING, verifying the solution is highly recommended!).
+ if is_precise:
+ assert solver.VerifySolution(1e-7, True)
- print("Problem solved in %f milliseconds" % solver.wall_time())
+ print("Problem solved in %f milliseconds" % solver.wall_time())
- # The objective value of the solution.
- print("Optimal objective value = %f" % solver.Objective().Value())
+ # The objective value of the solution.
+ print("Optimal objective value = %f" % solver.Objective().Value())
- # The value of each variable in the solution.
- for variable in variable_list:
- print("%s = %f" % (variable.name(), variable.solution_value()))
+ # The value of each variable in the solution.
+ for variable in variable_list:
+ print("%s = %f" % (variable.name(), variable.solution_value()))
- print("Advanced usage:")
- print("Problem solved in %d iterations" % solver.iterations())
- for variable in variable_list:
- print("%s: reduced cost = %f" % (variable.name(), variable.reduced_cost()))
- activities = solver.ComputeConstraintActivities()
- for i, constraint in enumerate(constraint_list):
- print(
- (
- "constraint %d: dual value = %f\n activity = %f"
- % (i, constraint.dual_value(), activities[constraint.index()])
- )
- )
+ print("Advanced usage:")
+ print("Problem solved in %d iterations" % solver.iterations())
+ for variable in variable_list:
+ print("%s: reduced cost = %f" % (variable.name(), variable.reduced_cost()))
+ activities = solver.ComputeConstraintActivities()
+ for i, constraint in enumerate(constraint_list):
+ print((
+ "constraint %d: dual value = %f\n activity = %f"
+ % (i, constraint.dual_value(), activities[constraint.index()])
+ ))
def main():
- RunLinearExampleNaturalLanguageAPI("GLOP")
- RunLinearExampleNaturalLanguageAPI("GLPK_LP")
- RunLinearExampleNaturalLanguageAPI("CLP")
- RunLinearExampleNaturalLanguageAPI("PDLP")
- RunLinearExampleNaturalLanguageAPI("XPRESS_LP")
+ RunLinearExampleNaturalLanguageAPI("GLOP")
+ RunLinearExampleNaturalLanguageAPI("GLPK_LP")
+ RunLinearExampleNaturalLanguageAPI("CLP")
+ RunLinearExampleNaturalLanguageAPI("PDLP")
+ RunLinearExampleNaturalLanguageAPI("XPRESS_LP")
- RunLinearExampleCppStyleAPI("GLOP")
- RunLinearExampleCppStyleAPI("GLPK_LP")
- RunLinearExampleCppStyleAPI("CLP")
- RunLinearExampleCppStyleAPI("PDLP")
- RunLinearExampleCppStyleAPI("XPRESS_LP")
+ RunLinearExampleCppStyleAPI("GLOP")
+ RunLinearExampleCppStyleAPI("GLPK_LP")
+ RunLinearExampleCppStyleAPI("CLP")
+ RunLinearExampleCppStyleAPI("PDLP")
+ RunLinearExampleCppStyleAPI("XPRESS_LP")
if __name__ == "__main__":
- main()
+ main()
diff --git a/examples/python/magic_sequence_distribute.py b/examples/python/magic_sequence_distribute.py
index ebfc8212ee2..ea8174badfd 100755
--- a/examples/python/magic_sequence_distribute.py
+++ b/examples/python/magic_sequence_distribute.py
@@ -30,28 +30,30 @@
def main(argv):
- # Create the solver.
- solver = pywrapcp.Solver("magic sequence")
+ # Create the solver.
+ solver = pywrapcp.Solver("magic sequence")
- # Create an array of IntVars to hold the answers.
- size = int(argv[1]) if len(argv) > 1 else 100
- all_values = list(range(0, size))
- all_vars = [solver.IntVar(0, size, "vars_%d" % i) for i in all_values]
+ # Create an array of IntVars to hold the answers.
+ size = int(argv[1]) if len(argv) > 1 else 100
+ all_values = list(range(0, size))
+ all_vars = [solver.IntVar(0, size, "vars_%d" % i) for i in all_values]
- # The number of variables equal to j shall be the value of all_vars[j].
- solver.Add(solver.Distribute(all_vars, all_values, all_vars))
+ # The number of variables equal to j shall be the value of all_vars[j].
+ solver.Add(solver.Distribute(all_vars, all_values, all_vars))
- # The sum of all the values shall be equal to the size.
- # (This constraint is redundant, but speeds up the search.)
- solver.Add(solver.Sum(all_vars) == size)
+ # The sum of all the values shall be equal to the size.
+ # (This constraint is redundant, but speeds up the search.)
+ solver.Add(solver.Sum(all_vars) == size)
- solver.NewSearch(
- solver.Phase(all_vars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
- )
- solver.NextSolution()
- print(all_vars)
- solver.EndSearch()
+ solver.NewSearch(
+ solver.Phase(
+ all_vars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+ )
+ solver.NextSolution()
+ print(all_vars)
+ solver.EndSearch()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/maximize_combinations_sat.py b/examples/python/maximize_combinations_sat.py
index a23e90d3846..a36a34a75c7 100644
--- a/examples/python/maximize_combinations_sat.py
+++ b/examples/python/maximize_combinations_sat.py
@@ -21,55 +21,55 @@
def maximize_combinations_sat() -> None:
- """Maximize the number of valid combinations of Boolean variables."""
- model = cp_model.CpModel()
- cards: list[cp_model.IntVar] = [
- model.new_bool_var("card1"),
- model.new_bool_var("card2"),
- model.new_bool_var("card3"),
- model.new_bool_var("card4"),
- ]
-
- combos: list[list[cp_model.IntVar]] = [
- [cards[0], cards[1]],
- [cards[0], cards[2]],
- [cards[1], cards[3]],
- [cards[0], cards[2], cards[3]],
- ]
-
- deck_size: int = 3
- model.add(sum(cards) == deck_size)
-
- valid_combos: list[cp_model.IntVar] = []
- for combination in combos:
- is_valid = model.new_bool_var("")
-
- # All true implies is_valid.
- model.add_bool_and(is_valid).only_enforce_if(combination)
-
- # is_valid implies all true.
- for literal in combination:
- model.add_implication(is_valid, literal)
- valid_combos.append(is_valid)
-
- model.maximize(sum(valid_combos))
-
- solver = cp_model.CpSolver()
- solver.parameters.log_search_progress = True
- status = solver.solve(model)
-
- if status == cp_model.OPTIMAL:
- print(
- "chosen cards:",
- [card.name for card in cards if solver.boolean_value(card)],
- )
+ """Maximize the number of valid combinations of Boolean variables."""
+ model = cp_model.CpModel()
+ cards: list[cp_model.IntVar] = [
+ model.new_bool_var("card1"),
+ model.new_bool_var("card2"),
+ model.new_bool_var("card3"),
+ model.new_bool_var("card4"),
+ ]
+
+ combos: list[list[cp_model.IntVar]] = [
+ [cards[0], cards[1]],
+ [cards[0], cards[2]],
+ [cards[1], cards[3]],
+ [cards[0], cards[2], cards[3]],
+ ]
+
+ deck_size: int = 3
+ model.add(sum(cards) == deck_size)
+
+ valid_combos: list[cp_model.IntVar] = []
+ for combination in combos:
+ is_valid = model.new_bool_var("")
+
+ # All true implies is_valid.
+ model.add_bool_and(is_valid).only_enforce_if(combination)
+
+ # is_valid implies all true.
+ for literal in combination:
+ model.add_implication(is_valid, literal)
+ valid_combos.append(is_valid)
+
+ model.maximize(sum(valid_combos))
+
+ solver = cp_model.CpSolver()
+ solver.parameters.log_search_progress = True
+ status = solver.solve(model)
+
+ if status == cp_model.OPTIMAL:
+ print(
+ "chosen cards:",
+ [card.name for card in cards if solver.boolean_value(card)],
+ )
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- maximize_combinations_sat()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ maximize_combinations_sat()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/maze_escape_sat.py b/examples/python/maze_escape_sat.py
index 7a96e453d83..1f8761a3dd4 100644
--- a/examples/python/maze_escape_sat.py
+++ b/examples/python/maze_escape_sat.py
@@ -51,125 +51,125 @@ def add_neighbor(
position_to_rank: Dict[Tuple[int, int, int], cp_model.IntVar],
arcs: list[Tuple[int, int, cp_model.LiteralT]],
) -> None:
- """Checks if the neighbor is valid, and adds it to the model."""
- if (
- x + dx < 0
- or x + dx >= size
- or y + dy < 0
- or y + dy >= size
- or z + dz < 0
- or z + dz >= size
- ):
- return
- before_index = index_map[(x, y, z)]
- before_rank = position_to_rank[(x, y, z)]
- after_index = index_map[(x + dx, y + dy, z + dz)]
- after_rank = position_to_rank[(x + dx, y + dy, z + dz)]
- move_literal = model.new_bool_var("")
- model.add(after_rank == before_rank + 1).only_enforce_if(move_literal)
- arcs.append((before_index, after_index, move_literal))
+ """Checks if the neighbor is valid, and adds it to the model."""
+ if (
+ x + dx < 0
+ or x + dx >= size
+ or y + dy < 0
+ or y + dy >= size
+ or z + dz < 0
+ or z + dz >= size
+ ):
+ return
+ before_index = index_map[(x, y, z)]
+ before_rank = position_to_rank[(x, y, z)]
+ after_index = index_map[(x + dx, y + dy, z + dz)]
+ after_rank = position_to_rank[(x + dx, y + dy, z + dz)]
+ move_literal = model.new_bool_var("")
+ model.add(after_rank == before_rank + 1).only_enforce_if(move_literal)
+ arcs.append((before_index, after_index, move_literal))
def escape_the_maze(params: str, output_proto: str) -> None:
- """Escapes the maze."""
- size = 4
- boxes = [(0, 1, 0), (2, 0, 1), (1, 3, 1), (3, 1, 3)]
- start = (3, 3, 0)
- end = (1, 0, 0)
-
- # Builds a map between each position in the grid and a unique integer between
- # 0 and size^3 - 1.
- index_map = {}
- reverse_map = []
- counter = 0
+ """Escapes the maze."""
+ size = 4
+ boxes = [(0, 1, 0), (2, 0, 1), (1, 3, 1), (3, 1, 3)]
+ start = (3, 3, 0)
+ end = (1, 0, 0)
+
+ # Builds a map between each position in the grid and a unique integer between
+ # 0 and size^3 - 1.
+ index_map = {}
+ reverse_map = []
+ counter = 0
+ for x in range(size):
+ for y in range(size):
+ for z in range(size):
+ index_map[(x, y, z)] = counter
+ reverse_map.append((x, y, z))
+ counter += 1
+
+ # Starts building the model.
+ model = cp_model.CpModel()
+ position_to_rank = {}
+
+ for coord in reverse_map:
+ position_to_rank[coord] = model.new_int_var(0, counter - 1, f"rank_{coord}")
+
+ # Path constraints.
+ model.add(position_to_rank[start] == 0)
+ model.add(position_to_rank[end] == counter - 1)
+ for i in range(len(boxes) - 1):
+ model.add(position_to_rank[boxes[i]] < position_to_rank[boxes[i + 1]])
+
+ # Circuit constraint: visit all blocks exactly once, and maintains the rank
+ # of each block.
+ arcs: list[Tuple[int, int, cp_model.LiteralT]] = []
+ for x in range(size):
+ for y in range(size):
+ for z in range(size):
+ add_neighbor(
+ size, x, y, z, -1, 0, 0, model, index_map, position_to_rank, arcs
+ )
+ add_neighbor(
+ size, x, y, z, 1, 0, 0, model, index_map, position_to_rank, arcs
+ )
+ add_neighbor(
+ size, x, y, z, 0, -1, 0, model, index_map, position_to_rank, arcs
+ )
+ add_neighbor(
+ size, x, y, z, 0, 1, 0, model, index_map, position_to_rank, arcs
+ )
+ add_neighbor(
+ size, x, y, z, 0, 0, -1, model, index_map, position_to_rank, arcs
+ )
+ add_neighbor(
+ size, x, y, z, 0, 0, 1, model, index_map, position_to_rank, arcs
+ )
+
+ # Closes the loop as the constraint expects a circuit, not a path.
+ arcs.append((index_map[end], index_map[start], True))
+
+ # Adds the circuit (hamiltonian path) constraint.
+ model.add_circuit(arcs)
+
+ # Exports the model if required.
+ if output_proto:
+ model.export_to_file(output_proto)
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ if params:
+ text_format.Parse(params, solver.parameters)
+ solver.parameters.log_search_progress = True
+ result = solver.solve(model)
+
+ # Prints solution.
+ if result == cp_model.OPTIMAL:
+ path = [""] * counter
for x in range(size):
- for y in range(size):
- for z in range(size):
- index_map[(x, y, z)] = counter
- reverse_map.append((x, y, z))
- counter += 1
-
- # Starts building the model.
- model = cp_model.CpModel()
- position_to_rank = {}
-
- for coord in reverse_map:
- position_to_rank[coord] = model.new_int_var(0, counter - 1, f"rank_{coord}")
-
- # Path constraints.
- model.add(position_to_rank[start] == 0)
- model.add(position_to_rank[end] == counter - 1)
- for i in range(len(boxes) - 1):
- model.add(position_to_rank[boxes[i]] < position_to_rank[boxes[i + 1]])
-
- # Circuit constraint: visit all blocks exactly once, and maintains the rank
- # of each block.
- arcs: list[Tuple[int, int, cp_model.LiteralT]] = []
- for x in range(size):
- for y in range(size):
- for z in range(size):
- add_neighbor(
- size, x, y, z, -1, 0, 0, model, index_map, position_to_rank, arcs
- )
- add_neighbor(
- size, x, y, z, 1, 0, 0, model, index_map, position_to_rank, arcs
- )
- add_neighbor(
- size, x, y, z, 0, -1, 0, model, index_map, position_to_rank, arcs
- )
- add_neighbor(
- size, x, y, z, 0, 1, 0, model, index_map, position_to_rank, arcs
- )
- add_neighbor(
- size, x, y, z, 0, 0, -1, model, index_map, position_to_rank, arcs
- )
- add_neighbor(
- size, x, y, z, 0, 0, 1, model, index_map, position_to_rank, arcs
- )
-
- # Closes the loop as the constraint expects a circuit, not a path.
- arcs.append((index_map[end], index_map[start], True))
-
- # Adds the circuit (hamiltonian path) constraint.
- model.add_circuit(arcs)
-
- # Exports the model if required.
- if output_proto:
- model.export_to_file(output_proto)
-
- # Solve model.
- solver = cp_model.CpSolver()
- if params:
- text_format.Parse(params, solver.parameters)
- solver.parameters.log_search_progress = True
- result = solver.solve(model)
-
- # Prints solution.
- if result == cp_model.OPTIMAL:
- path = [""] * counter
- for x in range(size):
- for y in range(size):
- for z in range(size):
- position = (x, y, z)
- rank = solver.value(position_to_rank[position])
- msg = f"({x}, {y}, {z})"
- if position == start:
- msg += " [start]"
- elif position == end:
- msg += " [end]"
- else:
- for b, box in enumerate(boxes):
- if position == box:
- msg += f" [boxes {b}]"
- path[rank] = msg
- print(path)
+ for y in range(size):
+ for z in range(size):
+ position = (x, y, z)
+ rank = solver.value(position_to_rank[position])
+ msg = f"({x}, {y}, {z})"
+ if position == start:
+ msg += " [start]"
+ elif position == end:
+ msg += " [end]"
+ else:
+ for b, box in enumerate(boxes):
+ if position == box:
+ msg += f" [boxes {b}]"
+ path[rank] = msg
+ print(path)
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- escape_the_maze(_PARAMS.value, _OUTPUT_PROTO.value)
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ escape_the_maze(_PARAMS.value, _OUTPUT_PROTO.value)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/memory_layout_and_infeasibility_sat.py b/examples/python/memory_layout_and_infeasibility_sat.py
index 9956700c225..be51cd3f342 100644
--- a/examples/python/memory_layout_and_infeasibility_sat.py
+++ b/examples/python/memory_layout_and_infeasibility_sat.py
@@ -46,136 +46,138 @@
def solve_hard_model(output_proto: str, params: str) -> bool:
- """Solves the hard assignment model."""
- print("Solving the hard assignment model")
- model = cp_model.CpModel()
+ """Solves the hard assignment model."""
+ print("Solving the hard assignment model")
+ model = cp_model.CpModel()
- x_intervals: List[cp_model.IntervalVar] = []
- y_starts: List[cp_model.IntVar] = []
- y_intervals: List[cp_model.IntervalVar] = []
+ x_intervals: List[cp_model.IntervalVar] = []
+ y_starts: List[cp_model.IntVar] = []
+ y_intervals: List[cp_model.IntervalVar] = []
- for start_time, end_time, demand, _ in DEMANDS:
- x_interval = model.new_fixed_size_interval_var(
- start_time, end_time - start_time + 1, ""
- )
- y_start = model.new_int_var(0, CAPACITY - demand, "")
- y_interval = model.new_fixed_size_interval_var(y_start, demand, "")
+ for start_time, end_time, demand, _ in DEMANDS:
+ x_interval = model.new_fixed_size_interval_var(
+ start_time, end_time - start_time + 1, ""
+ )
+ y_start = model.new_int_var(0, CAPACITY - demand, "")
+ y_interval = model.new_fixed_size_interval_var(y_start, demand, "")
- x_intervals.append(x_interval)
- y_starts.append(y_start)
- y_intervals.append(y_interval)
+ x_intervals.append(x_interval)
+ y_starts.append(y_start)
+ y_intervals.append(y_interval)
- model.add_no_overlap_2d(x_intervals, y_intervals)
+ model.add_no_overlap_2d(x_intervals, y_intervals)
- if output_proto:
- model.export_to_file(output_proto)
+ if output_proto:
+ model.export_to_file(output_proto)
- solver = cp_model.CpSolver()
- if params:
- text_format.Parse(params, solver.parameters)
- status = solver.solve(model)
- print(solver.response_stats())
+ solver = cp_model.CpSolver()
+ if params:
+ text_format.Parse(params, solver.parameters)
+ status = solver.solve(model)
+ print(solver.response_stats())
- if status in (cp_model.FEASIBLE, cp_model.OPTIMAL):
- for index, start_var in enumerate(y_starts):
- print(f"task {index} buffer starts at {solver.value(start_var)}")
+ if status in (cp_model.FEASIBLE, cp_model.OPTIMAL):
+ for index, start_var in enumerate(y_starts):
+ print(f"task {index} buffer starts at {solver.value(start_var)}")
- return status != cp_model.INFEASIBLE
+ return status != cp_model.INFEASIBLE
def solve_soft_model_with_assumptions() -> None:
- """Solves the soft model using assumptions."""
- print("Solving the soft model using assumptions")
-
- model = cp_model.CpModel()
-
- presences: List[cp_model.IntVar] = []
- x_intervals: List[cp_model.IntervalVar] = []
- y_starts: List[cp_model.IntVar] = []
- y_intervals: List[cp_model.IntervalVar] = []
-
- for start, end, demand, unused_alignment in DEMANDS:
- presence = model.new_bool_var("")
- x_interval = model.new_optional_fixed_size_interval_var(
- start, end - start + 1, presence, ""
- )
- y_start = model.new_int_var(0, CAPACITY - demand, "")
- y_interval = model.new_optional_fixed_size_interval_var(
- y_start, demand, presence, ""
- )
-
- presences.append(presence)
- x_intervals.append(x_interval)
- y_starts.append(y_start)
- y_intervals.append(y_interval)
-
- model.add_no_overlap_2d(x_intervals, y_intervals)
- model.add_assumptions(presences)
-
- solver = cp_model.CpSolver()
- status = solver.solve(model)
- print(solver.response_stats())
- if status == cp_model.INFEASIBLE:
- # The list actually contains the indices of the variables sufficient to
- # explain infeasibility.
- infeasible_variable_indices = solver.sufficient_assumptions_for_infeasibility()
- infeasible_variable_indices_set = set(infeasible_variable_indices)
-
- for index, presence in enumerate(presences):
- if presence.index in infeasible_variable_indices_set:
- print(f"using task {index} is sufficient to explain infeasibility")
+ """Solves the soft model using assumptions."""
+ print("Solving the soft model using assumptions")
+
+ model = cp_model.CpModel()
+
+ presences: List[cp_model.IntVar] = []
+ x_intervals: List[cp_model.IntervalVar] = []
+ y_starts: List[cp_model.IntVar] = []
+ y_intervals: List[cp_model.IntervalVar] = []
+
+ for start, end, demand, unused_alignment in DEMANDS:
+ presence = model.new_bool_var("")
+ x_interval = model.new_optional_fixed_size_interval_var(
+ start, end - start + 1, presence, ""
+ )
+ y_start = model.new_int_var(0, CAPACITY - demand, "")
+ y_interval = model.new_optional_fixed_size_interval_var(
+ y_start, demand, presence, ""
+ )
+
+ presences.append(presence)
+ x_intervals.append(x_interval)
+ y_starts.append(y_start)
+ y_intervals.append(y_interval)
+
+ model.add_no_overlap_2d(x_intervals, y_intervals)
+ model.add_assumptions(presences)
+
+ solver = cp_model.CpSolver()
+ status = solver.solve(model)
+ print(solver.response_stats())
+ if status == cp_model.INFEASIBLE:
+ # The list actually contains the indices of the variables sufficient to
+ # explain infeasibility.
+ infeasible_variable_indices = (
+ solver.sufficient_assumptions_for_infeasibility()
+ )
+ infeasible_variable_indices_set = set(infeasible_variable_indices)
+
+ for index, presence in enumerate(presences):
+ if presence.index in infeasible_variable_indices_set:
+ print(f"using task {index} is sufficient to explain infeasibility")
def solve_soft_model_with_maximization(params: str) -> None:
- """Solves the soft model using maximization."""
- print("Solving the soft model using minimization")
-
- model = cp_model.CpModel()
-
- presences: List[cp_model.IntVar] = []
- x_intervals: List[cp_model.IntervalVar] = []
- y_starts: List[cp_model.IntVar] = []
- y_intervals: List[cp_model.IntervalVar] = []
-
- for start, end, demand, unused_alignment in DEMANDS:
- presence = model.new_bool_var("")
- x_interval = model.new_optional_fixed_size_interval_var(
- start, end - start + 1, presence, ""
- )
- y_start = model.new_int_var(0, CAPACITY - demand, "")
- y_interval = model.new_optional_fixed_size_interval_var(
- y_start, demand, presence, ""
- )
-
- presences.append(presence)
- x_intervals.append(x_interval)
- y_starts.append(y_start)
- y_intervals.append(y_interval)
-
- model.add_no_overlap_2d(x_intervals, y_intervals)
-
- model.maximize(sum(presences))
-
- solver = cp_model.CpSolver()
- if params:
- text_format.Parse(params, solver.parameters)
- status = solver.solve(model)
- print(solver.response_stats())
- if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
- for index, presence in enumerate(presences):
- if not solver.boolean_value(presence):
- print(f"task {index} does not fit")
- else:
- print(f"task {index} buffer starts at {solver.value(y_starts[index])}")
+ """Solves the soft model using maximization."""
+ print("Solving the soft model using minimization")
+
+ model = cp_model.CpModel()
+
+ presences: List[cp_model.IntVar] = []
+ x_intervals: List[cp_model.IntervalVar] = []
+ y_starts: List[cp_model.IntVar] = []
+ y_intervals: List[cp_model.IntervalVar] = []
+
+ for start, end, demand, unused_alignment in DEMANDS:
+ presence = model.new_bool_var("")
+ x_interval = model.new_optional_fixed_size_interval_var(
+ start, end - start + 1, presence, ""
+ )
+ y_start = model.new_int_var(0, CAPACITY - demand, "")
+ y_interval = model.new_optional_fixed_size_interval_var(
+ y_start, demand, presence, ""
+ )
+
+ presences.append(presence)
+ x_intervals.append(x_interval)
+ y_starts.append(y_start)
+ y_intervals.append(y_interval)
+
+ model.add_no_overlap_2d(x_intervals, y_intervals)
+
+ model.maximize(sum(presences))
+
+ solver = cp_model.CpSolver()
+ if params:
+ text_format.Parse(params, solver.parameters)
+ status = solver.solve(model)
+ print(solver.response_stats())
+ if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
+ for index, presence in enumerate(presences):
+ if not solver.boolean_value(presence):
+ print(f"task {index} does not fit")
+ else:
+ print(f"task {index} buffer starts at {solver.value(y_starts[index])}")
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- if not solve_hard_model(_OUTPUT_PROTO.value, _PARAMS.value):
- solve_soft_model_with_assumptions()
- solve_soft_model_with_maximization(_PARAMS.value)
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ if not solve_hard_model(_OUTPUT_PROTO.value, _PARAMS.value):
+ solve_soft_model_with_assumptions()
+ solve_soft_model_with_maximization(_PARAMS.value)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/no_wait_baking_scheduling_sat.py b/examples/python/no_wait_baking_scheduling_sat.py
index 9c81d756743..80ef8ac6ff1 100644
--- a/examples/python/no_wait_baking_scheduling_sat.py
+++ b/examples/python/no_wait_baking_scheduling_sat.py
@@ -51,261 +51,259 @@
class Task:
- """A unit baking task.
+ """A unit baking task.
- - Simple baking tasks have a fixed duration. They are performed by workers.
- - Waiting/cooling/proofing tasks have a min and a max duration.
- They are performed by machine or they use space resources.
- """
+ - Simple baking tasks have a fixed duration. They are performed by workers.
+ - Waiting/cooling/proofing tasks have a min and a max duration.
+ They are performed by machine or they use space resources.
+ """
- def __init__(self, name, min_duration, max_duration):
- self.name = name
- self.min_duration = min_duration
- self.max_duration = max_duration
+ def __init__(self, name, min_duration, max_duration):
+ self.name = name
+ self.min_duration = min_duration
+ self.max_duration = max_duration
class Skill:
- """The skill of a worker or the capability of a machine."""
+ """The skill of a worker or the capability of a machine."""
- def __init__(self, name, efficiency):
- self.name = name
- # Efficiency is currently not used.
- self.efficiency = efficiency
+ def __init__(self, name, efficiency):
+ self.name = name
+ # Efficiency is currently not used.
+ self.efficiency = efficiency
class Recipe:
- """A recipe is a sequence of cooking tasks."""
+ """A recipe is a sequence of cooking tasks."""
- def __init__(self, name):
- self.name = name
- self.tasks = []
+ def __init__(self, name):
+ self.name = name
+ self.tasks = []
- def add_task(
- self, resource_name: str, min_duration: int, max_duration: int
- ) -> "Recipe":
- self.tasks.append(Task(resource_name, min_duration, max_duration))
- return self
+ def add_task(
+ self, resource_name: str, min_duration: int, max_duration: int
+ ) -> "Recipe":
+ self.tasks.append(Task(resource_name, min_duration, max_duration))
+ return self
class Resource:
- """A resource is a worker, a machine, or just some space for cakes to rest.
+ """A resource is a worker, a machine, or just some space for cakes to rest.
- - Workers have a capacity of 1 and can have variable efficiency.
- - Machines and spaces have a capacity greater or equal to one, but the
- efficiency is fixed to 100.
+ - Workers have a capacity of 1 and can have variable efficiency.
+ - Machines and spaces have a capacity greater or equal to one, but the
+ efficiency is fixed to 100.
- For a worker with efficiency k and a task of duration t, the resulting
- work will have a duration `ceil(t * k)`.
- """
+ For a worker with efficiency k and a task of duration t, the resulting
+ work will have a duration `ceil(t * k)`.
+ """
- def __init__(self, name, capacity):
- self.name = name
- self.capacity = capacity
- self.skills = []
+ def __init__(self, name, capacity):
+ self.name = name
+ self.capacity = capacity
+ self.skills = []
- def add_skill(self, skill_name: str, efficiency: float) -> "Resource":
- self.skills.append(Skill(skill_name, efficiency))
- return self
+ def add_skill(self, skill_name: str, efficiency: float) -> "Resource":
+ self.skills.append(Skill(skill_name, efficiency))
+ return self
class Order:
- """An order is a recipe that should be delivered at a given due date."""
+ """An order is a recipe that should be delivered at a given due date."""
- def __init__(self, unique_id, recipe_name, due_date, quantity):
- """Builds an order.
+ def __init__(self, unique_id, recipe_name, due_date, quantity):
+ """Builds an order.
- Args:
- unique_id: A unique identifier for the order. Used to display the result.
- recipe_name: The name of the recipe. It must match one of the recipes.
- due_date: The due date in minutes since midnight.
- quantity: How many cakes to prepare.
- """
- self.unique_id = unique_id
- self.recipe_name = recipe_name
- self.due_date = due_date
- self.quantity = quantity
+ Args:
+ unique_id: A unique identifier for the order. Used to display the result.
+ recipe_name: The name of the recipe. It must match one of the recipes.
+ due_date: The due date in minutes since midnight.
+ quantity: How many cakes to prepare.
+ """
+ self.unique_id = unique_id
+ self.recipe_name = recipe_name
+ self.due_date = due_date
+ self.quantity = quantity
def set_up_data() -> Tuple[List[Recipe], List[Resource], List[Order]]:
- """Set up the bakery problem data."""
-
- # Recipes.
- croissant_recipe = Recipe(CROISSANT)
- croissant_recipe.add_task(BAKING, 15, 15)
- croissant_recipe.add_task(PROOFING, 60, 90)
- croissant_recipe.add_task(COOKING, 20, 20)
- croissant_recipe.add_task(DISPLAY, 5, 5 * 60)
-
- apple_pie_recipe = Recipe(APPLE_PIE)
- apple_pie_recipe.add_task(BAKING, 25, 25)
- apple_pie_recipe.add_task(PROOFING, 15, 60)
- apple_pie_recipe.add_task(COOKING, 30, 30)
- apple_pie_recipe.add_task(DECORATING, 5, 5)
- apple_pie_recipe.add_task(DISPLAY, 5, 5 * 60)
-
- brioche_recipe = Recipe(BRIOCHE)
- brioche_recipe.add_task(BAKING, 20, 20)
- brioche_recipe.add_task(PROOFING, 60, 90)
- brioche_recipe.add_task(COOKING, 30, 30)
- brioche_recipe.add_task(DISPLAY, 5, 5 * 60)
-
- chocolate_cake_recipe = Recipe(CHOCOLATE_CAKE)
- chocolate_cake_recipe.add_task(BAKING, 15, 15)
- chocolate_cake_recipe.add_task(COOKING, 25, 25)
- chocolate_cake_recipe.add_task(DECORATING, 15, 15)
- chocolate_cake_recipe.add_task(DISPLAY, 5, 5 * 60)
- recipes = [
- croissant_recipe,
- apple_pie_recipe,
- brioche_recipe,
- chocolate_cake_recipe,
- ]
-
- # Resources.
- baker1 = Resource("baker1", 1).add_skill(BAKING, 1.0)
- baker2 = Resource("baker2", 1).add_skill(BAKING, 1.0)
- decorator1 = Resource("decorator1", 1).add_skill(DECORATING, 1.0)
- waiting_space = Resource("waiting_space", 4).add_skill(PROOFING, 1.0)
- oven = Resource("oven", 4).add_skill(COOKING, 1.0)
- display_space = Resource("display_space", 12).add_skill(DISPLAY, 1.0)
- resources = [baker1, baker2, decorator1, waiting_space, oven, display_space]
-
- # Orders
- croissant_7am = Order("croissant_7am", CROISSANT, 7 * 60, 3)
- croissant_8am = Order("croissant_8am", CROISSANT, 8 * 60, 3)
- croissant_9am = Order("croissant_9am", CROISSANT, 9 * 60, 2)
- croissant_10am = Order("croissant_10am", CROISSANT, 10 * 60, 1)
- croissant_11am = Order("croissant_11am", CROISSANT, 11 * 60, 1)
- brioche_10am = Order("brioche_10am", BRIOCHE, 10 * 60, 8)
- brioche_12pm = Order("brioche_12pm", BRIOCHE, 12 * 60, 8)
- apple_pie_1pm = Order("apple_pie_1pm", APPLE_PIE, 13 * 60, 10)
- chocolate_4pm = Order("chocolate_4pm", CHOCOLATE_CAKE, 16 * 60, 10)
- orders = [
- croissant_7am,
- croissant_8am,
- croissant_9am,
- croissant_10am,
- croissant_11am,
- brioche_10am,
- brioche_12pm,
- apple_pie_1pm,
- chocolate_4pm,
- ]
-
- return recipes, resources, orders
+ """Set up the bakery problem data."""
+
+ # Recipes.
+ croissant_recipe = Recipe(CROISSANT)
+ croissant_recipe.add_task(BAKING, 15, 15)
+ croissant_recipe.add_task(PROOFING, 60, 90)
+ croissant_recipe.add_task(COOKING, 20, 20)
+ croissant_recipe.add_task(DISPLAY, 5, 5 * 60)
+
+ apple_pie_recipe = Recipe(APPLE_PIE)
+ apple_pie_recipe.add_task(BAKING, 25, 25)
+ apple_pie_recipe.add_task(PROOFING, 15, 60)
+ apple_pie_recipe.add_task(COOKING, 30, 30)
+ apple_pie_recipe.add_task(DECORATING, 5, 5)
+ apple_pie_recipe.add_task(DISPLAY, 5, 5 * 60)
+
+ brioche_recipe = Recipe(BRIOCHE)
+ brioche_recipe.add_task(BAKING, 20, 20)
+ brioche_recipe.add_task(PROOFING, 60, 90)
+ brioche_recipe.add_task(COOKING, 30, 30)
+ brioche_recipe.add_task(DISPLAY, 5, 5 * 60)
+
+ chocolate_cake_recipe = Recipe(CHOCOLATE_CAKE)
+ chocolate_cake_recipe.add_task(BAKING, 15, 15)
+ chocolate_cake_recipe.add_task(COOKING, 25, 25)
+ chocolate_cake_recipe.add_task(DECORATING, 15, 15)
+ chocolate_cake_recipe.add_task(DISPLAY, 5, 5 * 60)
+ recipes = [
+ croissant_recipe,
+ apple_pie_recipe,
+ brioche_recipe,
+ chocolate_cake_recipe,
+ ]
+
+ # Resources.
+ baker1 = Resource("baker1", 1).add_skill(BAKING, 1.0)
+ baker2 = Resource("baker2", 1).add_skill(BAKING, 1.0)
+ decorator1 = Resource("decorator1", 1).add_skill(DECORATING, 1.0)
+ waiting_space = Resource("waiting_space", 4).add_skill(PROOFING, 1.0)
+ oven = Resource("oven", 4).add_skill(COOKING, 1.0)
+ display_space = Resource("display_space", 12).add_skill(DISPLAY, 1.0)
+ resources = [baker1, baker2, decorator1, waiting_space, oven, display_space]
+
+ # Orders
+ croissant_7am = Order("croissant_7am", CROISSANT, 7 * 60, 3)
+ croissant_8am = Order("croissant_8am", CROISSANT, 8 * 60, 3)
+ croissant_9am = Order("croissant_9am", CROISSANT, 9 * 60, 2)
+ croissant_10am = Order("croissant_10am", CROISSANT, 10 * 60, 1)
+ croissant_11am = Order("croissant_11am", CROISSANT, 11 * 60, 1)
+ brioche_10am = Order("brioche_10am", BRIOCHE, 10 * 60, 8)
+ brioche_12pm = Order("brioche_12pm", BRIOCHE, 12 * 60, 8)
+ apple_pie_1pm = Order("apple_pie_1pm", APPLE_PIE, 13 * 60, 10)
+ chocolate_4pm = Order("chocolate_4pm", CHOCOLATE_CAKE, 16 * 60, 10)
+ orders = [
+ croissant_7am,
+ croissant_8am,
+ croissant_9am,
+ croissant_10am,
+ croissant_11am,
+ brioche_10am,
+ brioche_12pm,
+ apple_pie_1pm,
+ chocolate_4pm,
+ ]
+
+ return recipes, resources, orders
def solve_with_cp_sat(
recipes: List[Recipe], resources: List[Resource], orders: List[Order]
) -> None:
- """Build the optimization model, and solve the problem."""
-
- model = cp_model.CpModel()
- horizon = 22 * 60 # 10PM.
- start_work = 4 * 60 # 4am.
-
- # Parse recipes.
- recipe_by_name = {}
- for recipe in recipes:
- recipe_by_name[recipe.name] = recipe
-
- # Parse resources.
- resource_by_name = {}
- resource_list_by_skill_name = collections.defaultdict(list)
- for resource in resources:
- resource_by_name[resource.name] = resource
- for skill in resource.skills:
- resource_list_by_skill_name[skill.name].append(resource)
-
- # Parse orders and create one optional copy per eligible resource and per
- # task.
- interval_list_by_resource_name = collections.defaultdict(list)
- orders_sequence_of_events = collections.defaultdict(list)
- sorted_orders = []
- tardiness_vars = []
- for order in orders:
- for batch in range(order.quantity):
- order_id = f"{order.unique_id}_{batch}"
- sorted_orders.append(order_id)
- previous_end = None
- due_date = order.due_date
- recipe = recipe_by_name[order.recipe_name]
- for task in recipe.tasks:
- skill_name = task.name
- suffix = f"_{order.unique_id}_batch{batch}_{skill_name}"
-
- if previous_end is None:
- start = model.new_int_var(start_work, horizon, f"start{suffix}")
- orders_sequence_of_events[order_id].append(
- (start, f"start{suffix}")
- )
- else:
- start = previous_end
-
- size = model.new_int_var(
- task.min_duration, task.max_duration, f"size{suffix}"
- )
- if task == recipe.tasks[-1]:
- # The order must end after the due_date. Ideally, exactly at the
- # due_date.
- tardiness = model.new_int_var(0, horizon - due_date, f"end{suffix}")
- end = tardiness + due_date
-
- # Store the end_var for the objective.
- tardiness_vars.append(tardiness)
- else:
- end = model.new_int_var(start_work, horizon, f"end{suffix}")
- orders_sequence_of_events[order_id].append((end, f"end{suffix}"))
- previous_end = end
-
- # Per resource copy.
- presence_literals = []
- for resource in resource_list_by_skill_name[skill_name]:
- presence = model.new_bool_var(f"presence{suffix}_{resource.name}")
- copy = model.new_optional_interval_var(
- start, size, end, presence, f"interval{suffix}_{resource.name}"
- )
- interval_list_by_resource_name[resource.name].append(copy)
- presence_literals.append(presence)
-
- # Only one copy will be performed.
- model.add_exactly_one(presence_literals)
-
- # Create resource constraints.
- for resource in resources:
- intervals = interval_list_by_resource_name[resource.name]
- if resource.capacity == 1:
- model.add_no_overlap(intervals)
+ """Build the optimization model, and solve the problem."""
+
+ model = cp_model.CpModel()
+ horizon = 22 * 60 # 10PM.
+ start_work = 4 * 60 # 4am.
+
+ # Parse recipes.
+ recipe_by_name = {}
+ for recipe in recipes:
+ recipe_by_name[recipe.name] = recipe
+
+ # Parse resources.
+ resource_by_name = {}
+ resource_list_by_skill_name = collections.defaultdict(list)
+ for resource in resources:
+ resource_by_name[resource.name] = resource
+ for skill in resource.skills:
+ resource_list_by_skill_name[skill.name].append(resource)
+
+ # Parse orders and create one optional copy per eligible resource and per
+ # task.
+ interval_list_by_resource_name = collections.defaultdict(list)
+ orders_sequence_of_events = collections.defaultdict(list)
+ sorted_orders = []
+ tardiness_vars = []
+ for order in orders:
+ for batch in range(order.quantity):
+ order_id = f"{order.unique_id}_{batch}"
+ sorted_orders.append(order_id)
+ previous_end = None
+ due_date = order.due_date
+ recipe = recipe_by_name[order.recipe_name]
+ for task in recipe.tasks:
+ skill_name = task.name
+ suffix = f"_{order.unique_id}_batch{batch}_{skill_name}"
+
+ if previous_end is None:
+ start = model.new_int_var(start_work, horizon, f"start{suffix}")
+ orders_sequence_of_events[order_id].append((start, f"start{suffix}"))
else:
- model.add_cumulative(intervals, [1] * len(intervals), resource.capacity)
-
- # The objective is to minimize the sum of the tardiness values of each jobs.
- # The tardiness is difference between the end time of an order and its
- # due date.
- model.minimize(sum(tardiness_vars))
-
- # Solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
- solver.parameters.log_search_progress = True
- status = solver.solve(model)
-
- if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
- for order_id in sorted_orders:
- print(f"{order_id}:")
- for time_expr, event_id in orders_sequence_of_events[order_id]:
- time = solver.value(time_expr)
- print(f" {event_id} at {time // 60}:{time % 60:02}")
+ start = previous_end
+
+ size = model.new_int_var(
+ task.min_duration, task.max_duration, f"size{suffix}"
+ )
+ if task == recipe.tasks[-1]:
+ # The order must end after the due_date. Ideally, exactly at the
+ # due_date.
+ tardiness = model.new_int_var(0, horizon - due_date, f"end{suffix}")
+ end = tardiness + due_date
+
+ # Store the end_var for the objective.
+ tardiness_vars.append(tardiness)
+ else:
+ end = model.new_int_var(start_work, horizon, f"end{suffix}")
+ orders_sequence_of_events[order_id].append((end, f"end{suffix}"))
+ previous_end = end
+
+ # Per resource copy.
+ presence_literals = []
+ for resource in resource_list_by_skill_name[skill_name]:
+ presence = model.new_bool_var(f"presence{suffix}_{resource.name}")
+ copy = model.new_optional_interval_var(
+ start, size, end, presence, f"interval{suffix}_{resource.name}"
+ )
+ interval_list_by_resource_name[resource.name].append(copy)
+ presence_literals.append(presence)
+
+ # Only one copy will be performed.
+ model.add_exactly_one(presence_literals)
+
+ # Create resource constraints.
+ for resource in resources:
+ intervals = interval_list_by_resource_name[resource.name]
+ if resource.capacity == 1:
+ model.add_no_overlap(intervals)
+ else:
+ model.add_cumulative(intervals, [1] * len(intervals), resource.capacity)
+
+ # The objective is to minimize the sum of the tardiness values of each jobs.
+ # The tardiness is difference between the end time of an order and its
+ # due date.
+ model.minimize(sum(tardiness_vars))
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+ solver.parameters.log_search_progress = True
+ status = solver.solve(model)
+
+ if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
+ for order_id in sorted_orders:
+ print(f"{order_id}:")
+ for time_expr, event_id in orders_sequence_of_events[order_id]:
+ time = solver.value(time_expr)
+ print(f" {event_id} at {time // 60}:{time % 60:02}")
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
- recipes, resources, orders = set_up_data()
- solve_with_cp_sat(recipes, resources, orders)
+ recipes, resources, orders = set_up_data()
+ solve_with_cp_sat(recipes, resources, orders)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/nqueens_sat.py b/examples/python/nqueens_sat.py
index e2f29542a50..6669cdc358f 100644
--- a/examples/python/nqueens_sat.py
+++ b/examples/python/nqueens_sat.py
@@ -24,84 +24,84 @@
class NQueenSolutionPrinter(cp_model.CpSolverSolutionCallback):
- """Print intermediate solutions."""
-
- def __init__(self, queens: list[cp_model.IntVar]):
- cp_model.CpSolverSolutionCallback.__init__(self)
- self._queens = queens
- self._solution_count = 0
- self._start_time = time.time()
-
- @property
- def solution_count(self) -> int:
- return self._solution_count
-
- def on_solution_callback(self) -> None:
- current_time = time.time()
- print(
- f"Solution{self._solution_count}, time ="
- f" {current_time - self._start_time} s"
- )
- self._solution_count += 1
-
- all_queens = range(len(self._queens))
- for i in all_queens:
- for j in all_queens:
- if self.value(self._queens[j]) == i:
- # There is a queen in column j, row i.
- print("Q", end=" ")
- else:
- print("_", end=" ")
- print()
- print()
+ """Print intermediate solutions."""
+
+ def __init__(self, queens: list[cp_model.IntVar]):
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self._queens = queens
+ self._solution_count = 0
+ self._start_time = time.time()
+
+ @property
+ def solution_count(self) -> int:
+ return self._solution_count
+
+ def on_solution_callback(self) -> None:
+ current_time = time.time()
+ print(
+ f"Solution{self._solution_count}, time ="
+ f" {current_time - self._start_time} s"
+ )
+ self._solution_count += 1
+
+ all_queens = range(len(self._queens))
+ for i in all_queens:
+ for j in all_queens:
+ if self.value(self._queens[j]) == i:
+ # There is a queen in column j, row i.
+ print("Q", end=" ")
+ else:
+ print("_", end=" ")
+ print()
+ print()
def main(_):
- board_size = _SIZE.value
-
- ### Creates the solver.
- model = cp_model.CpModel()
-
- ### Creates the variables.
- # The array index is the column, and the value is the row.
- queens = [
- model.new_int_var(0, board_size - 1, "x%i" % i) for i in range(board_size)
- ]
-
- ### Creates the constraints.
-
- # All columns must be different because the indices of queens are all
- # different, so we just add the all different constraint on the rows.
- model.add_all_different(queens)
-
- # No two queens can be on the same diagonal.
- diag1 = []
- diag2 = []
- for i in range(board_size):
- q1 = model.new_int_var(0, 2 * board_size, "diag1_%i" % i)
- q2 = model.new_int_var(-board_size, board_size, "diag2_%i" % i)
- diag1.append(q1)
- diag2.append(q2)
- model.add(q1 == queens[i] + i)
- model.add(q2 == queens[i] - i)
- model.add_all_different(diag1)
- model.add_all_different(diag2)
-
- ### Solve model.
- solver = cp_model.CpSolver()
- solution_printer = NQueenSolutionPrinter(queens)
- # Enumerate all solutions.
- solver.parameters.enumerate_all_solutions = True
- # solve.
- solver.solve(model, solution_printer)
-
- print()
- print("Statistics")
- print(" - conflicts : %i" % solver.num_conflicts)
- print(" - branches : %i" % solver.num_branches)
- print(" - wall time : %f s" % solver.wall_time)
- print(" - solutions found : %i" % solution_printer.solution_count)
+ board_size = _SIZE.value
+
+ ### Creates the solver.
+ model = cp_model.CpModel()
+
+ ### Creates the variables.
+ # The array index is the column, and the value is the row.
+ queens = [
+ model.new_int_var(0, board_size - 1, "x%i" % i) for i in range(board_size)
+ ]
+
+ ### Creates the constraints.
+
+ # All columns must be different because the indices of queens are all
+ # different, so we just add the all different constraint on the rows.
+ model.add_all_different(queens)
+
+ # No two queens can be on the same diagonal.
+ diag1 = []
+ diag2 = []
+ for i in range(board_size):
+ q1 = model.new_int_var(0, 2 * board_size, "diag1_%i" % i)
+ q2 = model.new_int_var(-board_size, board_size, "diag2_%i" % i)
+ diag1.append(q1)
+ diag2.append(q2)
+ model.add(q1 == queens[i] + i)
+ model.add(q2 == queens[i] - i)
+ model.add_all_different(diag1)
+ model.add_all_different(diag2)
+
+ ### Solve model.
+ solver = cp_model.CpSolver()
+ solution_printer = NQueenSolutionPrinter(queens)
+ # Enumerate all solutions.
+ solver.parameters.enumerate_all_solutions = True
+ # solve.
+ solver.solve(model, solution_printer)
+
+ print()
+ print("Statistics")
+ print(" - conflicts : %i" % solver.num_conflicts)
+ print(" - branches : %i" % solver.num_branches)
+ print(" - wall time : %f s" % solver.wall_time)
+ print(" - solutions found : %i" % solution_printer.solution_count)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/pell_equation_sat.py b/examples/python/pell_equation_sat.py
index 3583c5557d3..38697fcc647 100644
--- a/examples/python/pell_equation_sat.py
+++ b/examples/python/pell_equation_sat.py
@@ -26,41 +26,41 @@
def solve_pell(coeff: int, max_value: int) -> None:
- """Solves Pell's equation x^2 - coeff * y^2 = 1."""
- model = cp_model.CpModel()
+ """Solves Pell's equation x^2 - coeff * y^2 = 1."""
+ model = cp_model.CpModel()
- x = model.new_int_var(1, max_value, "x")
- y = model.new_int_var(1, max_value, "y")
+ x = model.new_int_var(1, max_value, "x")
+ y = model.new_int_var(1, max_value, "y")
- # Pell's equation:
- x_square = model.new_int_var(1, max_value * max_value, "x_square")
- y_square = model.new_int_var(1, max_value * max_value, "y_square")
- model.add_multiplication_equality(x_square, x, x)
- model.add_multiplication_equality(y_square, y, y)
- model.add(x_square - coeff * y_square == 1)
+ # Pell's equation:
+ x_square = model.new_int_var(1, max_value * max_value, "x_square")
+ y_square = model.new_int_var(1, max_value * max_value, "y_square")
+ model.add_multiplication_equality(x_square, x, x)
+ model.add_multiplication_equality(y_square, y, y)
+ model.add(x_square - coeff * y_square == 1)
- model.add_decision_strategy(
- [x, y], cp_model.CHOOSE_MIN_DOMAIN_SIZE, cp_model.SELECT_MIN_VALUE
- )
+ model.add_decision_strategy(
+ [x, y], cp_model.CHOOSE_MIN_DOMAIN_SIZE, cp_model.SELECT_MIN_VALUE
+ )
- solver = cp_model.CpSolver()
- solver.parameters.num_workers = 12
- solver.parameters.log_search_progress = True
- solver.parameters.cp_model_presolve = True
- solver.parameters.cp_model_probing_level = 0
+ solver = cp_model.CpSolver()
+ solver.parameters.num_workers = 12
+ solver.parameters.log_search_progress = True
+ solver.parameters.cp_model_presolve = True
+ solver.parameters.cp_model_probing_level = 0
- result = solver.solve(model)
- if result == cp_model.OPTIMAL:
- print(f"x={solver.value(x)} y={solver.value(y)} coeff={coeff}")
- if solver.value(x) ** 2 - coeff * (solver.value(y) ** 2) != 1:
- raise ValueError("Pell equation not satisfied.")
+ result = solver.solve(model)
+ if result == cp_model.OPTIMAL:
+ print(f"x={solver.value(x)} y={solver.value(y)} coeff={coeff}")
+ if solver.value(x) ** 2 - coeff * (solver.value(y) ** 2) != 1:
+ raise ValueError("Pell equation not satisfied.")
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- solve_pell(_COEFF.value, _MAX_VALUE.value)
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ solve_pell(_COEFF.value, _MAX_VALUE.value)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/pentominoes_sat.py b/examples/python/pentominoes_sat.py
index 01479ec6ac0..36c0af65ef3 100644
--- a/examples/python/pentominoes_sat.py
+++ b/examples/python/pentominoes_sat.py
@@ -49,158 +49,158 @@
def is_one(mask: List[List[int]], x: int, y: int, orientation: int) -> bool:
- """Returns true if the oriented piece is 1 at position [i][j].
-
- The 3 bits in orientation respectively mean: transposition, symmetry by
- x axis, symmetry by y axis.
-
- Args:
- mask: The shape of the piece.
- x: position.
- y: position.
- orientation: between 0 and 7.
- """
- if orientation & 1:
- tmp: int = x
- x = y
- y = tmp
- if orientation & 2:
- x = len(mask[0]) - 1 - x
- if orientation & 4:
- y = len(mask) - 1 - y
- return mask[y][x] == 1
+ """Returns true if the oriented piece is 1 at position [i][j].
+
+ The 3 bits in orientation respectively mean: transposition, symmetry by
+ x axis, symmetry by y axis.
+
+ Args:
+ mask: The shape of the piece.
+ x: position.
+ y: position.
+ orientation: between 0 and 7.
+ """
+ if orientation & 1:
+ tmp: int = x
+ x = y
+ y = tmp
+ if orientation & 2:
+ x = len(mask[0]) - 1 - x
+ if orientation & 4:
+ y = len(mask) - 1 - y
+ return mask[y][x] == 1
def get_height(mask: List[List[int]], orientation: int) -> int:
- if orientation & 1:
- return len(mask[0])
- return len(mask)
+ if orientation & 1:
+ return len(mask[0])
+ return len(mask)
def get_width(mask: List[List[int]], orientation: int) -> int:
- if orientation & 1:
- return len(mask)
- return len(mask[0])
+ if orientation & 1:
+ return len(mask)
+ return len(mask[0])
def orientation_is_redundant(mask: List[List[int]], orientation: int) -> bool:
- """Checks if the current rotated figure is the same as a previous rotation."""
- size_i: int = get_width(mask, orientation)
- size_j: int = get_height(mask, orientation)
- for o in range(orientation):
- if size_i != get_width(mask, o):
- continue
- if size_j != get_height(mask, o):
- continue
-
- is_the_same: bool = True
- for k in range(size_i):
- if not is_the_same:
- break
- for l in range(size_j):
- if not is_the_same:
- break
- if is_one(mask, k, l, orientation) != is_one(mask, k, l, o):
- is_the_same = False
- if is_the_same:
- return True
- return False
+ """Checks if the current rotated figure is the same as a previous rotation."""
+ size_i: int = get_width(mask, orientation)
+ size_j: int = get_height(mask, orientation)
+ for o in range(orientation):
+ if size_i != get_width(mask, o):
+ continue
+ if size_j != get_height(mask, o):
+ continue
+
+ is_the_same: bool = True
+ for k in range(size_i):
+ if not is_the_same:
+ break
+ for l in range(size_j):
+ if not is_the_same:
+ break
+ if is_one(mask, k, l, orientation) != is_one(mask, k, l, o):
+ is_the_same = False
+ if is_the_same:
+ return True
+ return False
def generate_and_solve_problem(pieces: Dict[str, List[List[int]]]) -> None:
- """Solves the pentominoes problem."""
- box_height = _HEIGHT.value
- box_width = 5 * len(pieces) // box_height
- print(f"Box has dimension {box_height} * {box_width}")
-
- model = cp_model.CpModel()
- position_to_variables: List[List[List[cp_model.IntVar]]] = [
- [[] for _ in range(box_width)] for _ in range(box_height)
- ]
-
- for name, mask in pieces.items():
- all_position_variables = []
- for orientation in range(8):
- if orientation_is_redundant(mask, orientation):
- continue
- piece_width = get_width(mask, orientation)
- piece_height = get_height(mask, orientation)
- for i in range(box_width - piece_width + 1):
- for j in range(box_height - piece_height + 1):
- v = model.new_bool_var(name)
- all_position_variables.append(v)
- for k in range(piece_width):
- for l in range(piece_height):
- if is_one(mask, k, l, orientation):
- position_to_variables[j + l][i + k].append(v)
-
- # Only one combination is selected.
- model.add_exactly_one(all_position_variables)
-
- for one_column in position_to_variables:
- for all_pieces_in_one_position in one_column:
- model.add_exactly_one(all_pieces_in_one_position)
-
- # Solve the model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
- status = solver.solve(model)
+ """Solves the pentominoes problem."""
+ box_height = _HEIGHT.value
+ box_width = 5 * len(pieces) // box_height
+ print(f"Box has dimension {box_height} * {box_width}")
+
+ model = cp_model.CpModel()
+ position_to_variables: List[List[List[cp_model.IntVar]]] = [
+ [[] for _ in range(box_width)] for _ in range(box_height)
+ ]
+
+ for name, mask in pieces.items():
+ all_position_variables = []
+ for orientation in range(8):
+ if orientation_is_redundant(mask, orientation):
+ continue
+ piece_width = get_width(mask, orientation)
+ piece_height = get_height(mask, orientation)
+ for i in range(box_width - piece_width + 1):
+ for j in range(box_height - piece_height + 1):
+ v = model.new_bool_var(name)
+ all_position_variables.append(v)
+ for k in range(piece_width):
+ for l in range(piece_height):
+ if is_one(mask, k, l, orientation):
+ position_to_variables[j + l][i + k].append(v)
+
+ # Only one combination is selected.
+ model.add_exactly_one(all_position_variables)
+
+ for one_column in position_to_variables:
+ for all_pieces_in_one_position in one_column:
+ model.add_exactly_one(all_pieces_in_one_position)
+
+ # Solve the model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+ status = solver.solve(model)
+
+ print(
+ f"Problem {_PIECES.value} box {box_height}*{box_width} solved in"
+ f" {solver.wall_time}s with status {solver.status_name(status)}"
+ )
+
+ # Print the solution.
+ if status == cp_model.OPTIMAL:
+ for y in range(box_height):
+ line = ""
+ for x in range(box_width):
+ for v in position_to_variables[y][x]:
+ if solver.BooleanValue(v):
+ line += v.name
+ break
+ print(line)
+
+def main(argv: Sequence[str]) -> None:
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+
+ # Pieces are stored in a matrix. mask[height][width]
+ pieces: Dict[str, List[List[int]]] = {
+ "F": [[0, 1, 1], [1, 1, 0], [0, 1, 0]],
+ "I": [[1, 1, 1, 1, 1]],
+ "L": [[1, 1, 1, 1], [1, 0, 0, 0]],
+ "N": [[1, 1, 1, 0], [0, 0, 1, 1]],
+ "P": [[1, 1, 1], [1, 1, 0]],
+ "T": [[1, 1, 1], [0, 1, 0], [0, 1, 0]],
+ "U": [[1, 0, 1], [1, 1, 1]],
+ "V": [[1, 0, 0], [1, 0, 0], [1, 1, 1]],
+ "W": [[1, 0, 0], [1, 1, 0], [0, 1, 1]],
+ "X": [[0, 1, 0], [1, 1, 1], [0, 1, 0]],
+ "Y": [[1, 1, 1, 1], [0, 1, 0, 0]],
+ "Z": [[1, 1, 0], [0, 1, 0], [0, 1, 1]],
+ }
+ selected_pieces: Dict[str, List[List[int]]] = {}
+ for p in _PIECES.value:
+ if p not in pieces:
+ print(f"Piece {p} not found in the list of pieces")
+ return
+ selected_pieces[p] = pieces[p]
+ if (len(selected_pieces) * 5) % _HEIGHT.value != 0:
print(
- f"Problem {_PIECES.value} box {box_height}*{box_width} solved in"
- f" {solver.wall_time}s with status {solver.status_name(status)}"
+ f"The height {_HEIGHT.value} does not divide the total area"
+ f" {5 * len(selected_pieces)}"
)
+ return
+ if _HEIGHT.value < 3 or 5 * len(selected_pieces) // _HEIGHT.value < 3:
+ print(f"The height {_HEIGHT.value} is not compatible with the pieces.")
+ return
- # Print the solution.
- if status == cp_model.OPTIMAL:
- for y in range(box_height):
- line = ""
- for x in range(box_width):
- for v in position_to_variables[y][x]:
- if solver.BooleanValue(v):
- line += v.name
- break
- print(line)
-
-
-def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
-
- # Pieces are stored in a matrix. mask[height][width]
- pieces: Dict[str, List[List[int]]] = {
- "F": [[0, 1, 1], [1, 1, 0], [0, 1, 0]],
- "I": [[1, 1, 1, 1, 1]],
- "L": [[1, 1, 1, 1], [1, 0, 0, 0]],
- "N": [[1, 1, 1, 0], [0, 0, 1, 1]],
- "P": [[1, 1, 1], [1, 1, 0]],
- "T": [[1, 1, 1], [0, 1, 0], [0, 1, 0]],
- "U": [[1, 0, 1], [1, 1, 1]],
- "V": [[1, 0, 0], [1, 0, 0], [1, 1, 1]],
- "W": [[1, 0, 0], [1, 1, 0], [0, 1, 1]],
- "X": [[0, 1, 0], [1, 1, 1], [0, 1, 0]],
- "Y": [[1, 1, 1, 1], [0, 1, 0, 0]],
- "Z": [[1, 1, 0], [0, 1, 0], [0, 1, 1]],
- }
- selected_pieces: Dict[str, List[List[int]]] = {}
- for p in _PIECES.value:
- if p not in pieces:
- print(f"Piece {p} not found in the list of pieces")
- return
- selected_pieces[p] = pieces[p]
- if (len(selected_pieces) * 5) % _HEIGHT.value != 0:
- print(
- f"The height {_HEIGHT.value} does not divide the total area"
- f" {5 * len(selected_pieces)}"
- )
- return
- if _HEIGHT.value < 3 or 5 * len(selected_pieces) // _HEIGHT.value < 3:
- print(f"The height {_HEIGHT.value} is not compatible with the pieces.")
- return
-
- generate_and_solve_problem(selected_pieces)
+ generate_and_solve_problem(selected_pieces)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/prize_collecting_tsp.py b/examples/python/prize_collecting_tsp.py
index a288498465d..7099fd3caa6 100755
--- a/examples/python/prize_collecting_tsp.py
+++ b/examples/python/prize_collecting_tsp.py
@@ -13,51 +13,1691 @@
# limitations under the License.
"""Simple prize collecting TSP problem with a max distance."""
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
+from ortools.routing import enums_pb2
+from ortools.routing import pywraprouting
DISTANCE_MATRIX = [
- [0, 10938, 4542, 2835, 29441, 2171, 1611, 9208, 9528, 11111, 16120, 22606, 22127, 20627, 21246, 23387, 16697, 33609, 26184, 24772, 22644, 20655, 30492, 23296, 32979, 18141, 19248, 17129, 17192, 15645, 12658, 11210, 12094, 13175, 18162, 4968, 12308, 10084, 13026, 15056],
- [10938, 0, 6422, 9742, 18988, 12974, 11216, 19715, 19004, 18271, 25070, 31971, 31632, 30571, 31578, 33841, 27315, 43964, 36944, 35689, 33569, 31481, 41360, 33760, 43631, 28730, 29976, 27803, 28076, 26408, 23504, 22025, 22000, 13197, 14936, 15146, 23246, 20956, 23963, 25994],
- [4542, 6422, 0, 3644, 25173, 6552, 5092, 13584, 13372, 13766, 19805, 26537, 26117, 24804, 25590, 27784, 21148, 37981, 30693, 29315, 27148, 25071, 34943, 27472, 37281, 22389, 23592, 21433, 21655, 20011, 17087, 15612, 15872, 11653, 15666, 8842, 16843, 14618, 17563, 19589],
- [2835, 9742, 3644, 0, 28681, 3851, 4341, 11660, 12294, 13912, 18893, 25283, 24777, 23173, 23636, 25696, 18950, 35927, 28233, 26543, 24127, 21864, 31765, 24018, 33904, 19005, 20295, 18105, 18551, 16763, 13958, 12459, 12296, 10370, 15331, 5430, 14044, 12135, 14771, 16743],
- [29441, 18988, 25173, 28681, 0, 31590, 29265, 37173, 35501, 32929, 40239, 47006, 46892, 46542, 48112, 50506, 44539, 60103, 54208, 53557, 51878, 50074, 59849, 52645, 62415, 47544, 48689, 46560, 46567, 45086, 42083, 40648, 40971, 29929, 28493, 34015, 41473, 38935, 42160, 44198],
- [2171, 12974, 6552, 3851, 31590, 0, 3046, 7856, 8864, 11330, 15411, 21597, 21065, 19382, 19791, 21845, 15099, 32076, 24425, 22848, 20600, 18537, 28396, 21125, 30825, 15975, 17101, 14971, 15104, 13503, 10544, 9080, 9983, 13435, 18755, 2947, 10344, 8306, 11069, 13078],
- [1611, 11216, 5092, 4341, 29265, 3046, 0, 8526, 8368, 9573, 14904, 21529, 21085, 19719, 20504, 22713, 16118, 32898, 25728, 24541, 22631, 20839, 30584, 23755, 33278, 18557, 19545, 17490, 17309, 15936, 12881, 11498, 12944, 14711, 19589, 5993, 12227, 9793, 12925, 14967],
- [9208, 19715, 13584, 11660, 37173, 7856, 8526, 0, 3248, 7855, 8245, 13843, 13272, 11526, 12038, 14201, 7599, 24411, 17259, 16387, 15050, 13999, 23134, 17899, 26460, 12894, 13251, 11680, 10455, 9997, 7194, 6574, 10678, 20959, 26458, 8180, 5255, 2615, 5730, 7552],
- [9528, 19004, 13372, 12294, 35501, 8864, 8368, 3248, 0, 4626, 6598, 13168, 12746, 11567, 12731, 15083, 9120, 25037, 18718, 18433, 17590, 16888, 25630, 20976, 29208, 16055, 16300, 14838, 13422, 13165, 10430, 9813, 13777, 22300, 27564, 10126, 8388, 5850, 8778, 10422],
- [11111, 18271, 13766, 13912, 32929, 11330, 9573, 7855, 4626, 0, 7318, 14185, 14005, 13655, 15438, 17849, 12839, 27179, 21947, 22230, 21814, 21366, 29754, 25555, 33535, 20674, 20872, 19457, 17961, 17787, 15048, 14372, 18115, 24280, 29101, 13400, 13008, 10467, 13375, 14935],
- [16120, 25070, 19805, 18893, 40239, 15411, 14904, 8245, 6598, 7318, 0, 6939, 6702, 6498, 8610, 10961, 7744, 19889, 15350, 16403, 16975, 17517, 24357, 22176, 28627, 18093, 17672, 16955, 14735, 15510, 13694, 13768, 18317, 28831, 34148, 16326, 11276, 9918, 11235, 11891],
- [22606, 31971, 26537, 25283, 47006, 21597, 21529, 13843, 13168, 14185, 6939, 0, 793, 3401, 5562, 6839, 8923, 13433, 11264, 13775, 15853, 17629, 21684, 22315, 26411, 19539, 18517, 18636, 16024, 17632, 16948, 17587, 22131, 34799, 40296, 21953, 14739, 14568, 14366, 14002],
- [22127, 31632, 26117, 24777, 46892, 21065, 21085, 13272, 12746, 14005, 6702, 793, 0, 2608, 4809, 6215, 8151, 13376, 10702, 13094, 15099, 16845, 21039, 21535, 25744, 18746, 17725, 17845, 15232, 16848, 16197, 16859, 21391, 34211, 39731, 21345, 14006, 13907, 13621, 13225],
- [20627, 30571, 24804, 23173, 46542, 19382, 19719, 11526, 11567, 13655, 6498, 3401, 2608, 0, 2556, 4611, 5630, 13586, 9157, 11005, 12681, 14285, 19044, 18996, 23644, 16138, 15126, 15240, 12625, 14264, 13736, 14482, 18958, 32292, 37879, 19391, 11621, 11803, 11188, 10671],
- [21246, 31578, 25590, 23636, 48112, 19791, 20504, 12038, 12731, 15438, 8610, 5562, 4809, 2556, 0, 2411, 4917, 12395, 6757, 8451, 10292, 12158, 16488, 16799, 21097, 14374, 13194, 13590, 10943, 12824, 12815, 13779, 18042, 32259, 37918, 19416, 10975, 11750, 10424, 9475],
- [23387, 33841, 27784, 25696, 50506, 21845, 22713, 14201, 15083, 17849, 10961, 6839, 6215, 4611, 2411, 0, 6760, 10232, 4567, 7010, 9607, 12003, 14846, 16408, 19592, 14727, 13336, 14109, 11507, 13611, 14104, 15222, 19237, 34013, 39703, 21271, 12528, 13657, 11907, 10633],
- [16697, 27315, 21148, 18950, 44539, 15099, 16118, 7599, 9120, 12839, 7744, 8923, 8151, 5630, 4917, 6760, 0, 16982, 9699, 9400, 9302, 9823, 16998, 14534, 21042, 10911, 10190, 9900, 7397, 8758, 8119, 8948, 13353, 27354, 33023, 14542, 6106, 6901, 5609, 5084],
- [33609, 43964, 37981, 35927, 60103, 32076, 32898, 24411, 25037, 27179, 19889, 13433, 13376, 13586, 12395, 10232, 16982, 0, 8843, 12398, 16193, 19383, 16423, 22583, 20997, 22888, 21194, 22640, 20334, 22636, 23801, 25065, 28675, 44048, 49756, 31426, 22528, 23862, 21861, 20315],
- [26184, 36944, 30693, 28233, 54208, 24425, 25728, 17259, 18718, 21947, 15350, 11264, 10702, 9157, 6757, 4567, 9699, 8843, 0, 3842, 7518, 10616, 10666, 14237, 15515, 14053, 12378, 13798, 11537, 13852, 15276, 16632, 19957, 35660, 41373, 23361, 14333, 16125, 13624, 11866],
- [24772, 35689, 29315, 26543, 53557, 22848, 24541, 16387, 18433, 22230, 16403, 13775, 13094, 11005, 8451, 7010, 9400, 12398, 3842, 0, 3795, 7014, 8053, 10398, 12657, 10633, 8889, 10569, 8646, 10938, 12906, 14366, 17106, 33171, 38858, 21390, 12507, 14748, 11781, 9802],
- [22644, 33569, 27148, 24127, 51878, 20600, 22631, 15050, 17590, 21814, 16975, 15853, 15099, 12681, 10292, 9607, 9302, 16193, 7518, 3795, 0, 3250, 8084, 6873, 11763, 6949, 5177, 7050, 5619, 7730, 10187, 11689, 13792, 30012, 35654, 18799, 10406, 12981, 9718, 7682],
- [20655, 31481, 25071, 21864, 50074, 18537, 20839, 13999, 16888, 21366, 17517, 17629, 16845, 14285, 12158, 12003, 9823, 19383, 10616, 7014, 3250, 0, 9901, 4746, 12531, 3737, 1961, 4036, 3588, 5109, 7996, 9459, 10846, 27094, 32690, 16451, 8887, 11624, 8304, 6471],
- [30492, 41360, 34943, 31765, 59849, 28396, 30584, 23134, 25630, 29754, 24357, 21684, 21039, 19044, 16488, 14846, 16998, 16423, 10666, 8053, 8084, 9901, 0, 9363, 4870, 13117, 11575, 13793, 13300, 15009, 17856, 19337, 20454, 36551, 42017, 26352, 18403, 21033, 17737, 15720],
- [23296, 33760, 27472, 24018, 52645, 21125, 23755, 17899, 20976, 25555, 22176, 22315, 21535, 18996, 16799, 16408, 14534, 22583, 14237, 10398, 6873, 4746, 9363, 0, 10020, 5211, 4685, 6348, 7636, 8010, 11074, 12315, 11926, 27537, 32880, 18634, 12644, 15358, 12200, 10674],
- [32979, 43631, 37281, 33904, 62415, 30825, 33278, 26460, 29208, 33535, 28627, 26411, 25744, 23644, 21097, 19592, 21042, 20997, 15515, 12657, 11763, 12531, 4870, 10020, 0, 14901, 13738, 15855, 16118, 17348, 20397, 21793, 21936, 37429, 42654, 28485, 21414, 24144, 20816, 18908],
- [18141, 28730, 22389, 19005, 47544, 15975, 18557, 12894, 16055, 20674, 18093, 19539, 18746, 16138, 14374, 14727, 10911, 22888, 14053, 10633, 6949, 3737, 13117, 5211, 14901, 0, 1777, 1217, 3528, 2896, 5892, 7104, 7338, 23517, 29068, 13583, 7667, 10304, 7330, 6204],
- [19248, 29976, 23592, 20295, 48689, 17101, 19545, 13251, 16300, 20872, 17672, 18517, 17725, 15126, 13194, 13336, 10190, 21194, 12378, 8889, 5177, 1961, 11575, 4685, 13738, 1777, 0, 2217, 2976, 3610, 6675, 8055, 8965, 25197, 30774, 14865, 8007, 10742, 7532, 6000],
- [17129, 27803, 21433, 18105, 46560, 14971, 17490, 11680, 14838, 19457, 16955, 18636, 17845, 15240, 13590, 14109, 9900, 22640, 13798, 10569, 7050, 4036, 13793, 6348, 15855, 1217, 2217, 0, 2647, 1686, 4726, 6000, 6810, 23060, 28665, 12674, 6450, 9094, 6117, 5066],
- [17192, 28076, 21655, 18551, 46567, 15104, 17309, 10455, 13422, 17961, 14735, 16024, 15232, 12625, 10943, 11507, 7397, 20334, 11537, 8646, 5619, 3588, 13300, 7636, 16118, 3528, 2976, 2647, 0, 2320, 4593, 6093, 8479, 24542, 30219, 13194, 5301, 8042, 4735, 3039],
- [15645, 26408, 20011, 16763, 45086, 13503, 15936, 9997, 13165, 17787, 15510, 17632, 16848, 14264, 12824, 13611, 8758, 22636, 13852, 10938, 7730, 5109, 15009, 8010, 17348, 2896, 3610, 1686, 2320, 0, 3086, 4444, 6169, 22301, 27963, 11344, 4780, 7408, 4488, 3721],
- [12658, 23504, 17087, 13958, 42083, 10544, 12881, 7194, 10430, 15048, 13694, 16948, 16197, 13736, 12815, 14104, 8119, 23801, 15276, 12906, 10187, 7996, 17856, 11074, 20397, 5892, 6675, 4726, 4593, 3086, 0, 1501, 5239, 20390, 26101, 8611, 2418, 4580, 2599, 3496],
- [11210, 22025, 15612, 12459, 40648, 9080, 11498, 6574, 9813, 14372, 13768, 17587, 16859, 14482, 13779, 15222, 8948, 25065, 16632, 14366, 11689, 9459, 19337, 12315, 21793, 7104, 8055, 6000, 6093, 4444, 1501, 0, 4608, 19032, 24747, 7110, 2860, 4072, 3355, 4772],
- [12094, 22000, 15872, 12296, 40971, 9983, 12944, 10678, 13777, 18115, 18317, 22131, 21391, 18958, 18042, 19237, 13353, 28675, 19957, 17106, 13792, 10846, 20454, 11926, 21936, 7338, 8965, 6810, 8479, 6169, 5239, 4608, 0, 16249, 21866, 7146, 7403, 8446, 7773, 8614],
- [13175, 13197, 11653, 10370, 29929, 13435, 14711, 20959, 22300, 24280, 28831, 34799, 34211, 32292, 32259, 34013, 27354, 44048, 35660, 33171, 30012, 27094, 36551, 27537, 37429, 23517, 25197, 23060, 24542, 22301, 20390, 19032, 16249, 0, 5714, 12901, 21524, 20543, 22186, 23805],
- [18162, 14936, 15666, 15331, 28493, 18755, 19589, 26458, 27564, 29101, 34148, 40296, 39731, 37879, 37918, 39703, 33023, 49756, 41373, 38858, 35654, 32690, 42017, 32880, 42654, 29068, 30774, 28665, 30219, 27963, 26101, 24747, 21866, 5714, 0, 18516, 27229, 26181, 27895, 29519],
- [4968, 15146, 8842, 5430, 34015, 2947, 5993, 8180, 10126, 13400, 16326, 21953, 21345, 19391, 19416, 21271, 14542, 31426, 23361, 21390, 18799, 16451, 26352, 18634, 28485, 13583, 14865, 12674, 13194, 11344, 8611, 7110, 7146, 12901, 18516, 0, 9029, 7668, 9742, 11614],
- [12308, 23246, 16843, 14044, 41473, 10344, 12227, 5255, 8388, 13008, 11276, 14739, 14006, 11621, 10975, 12528, 6106, 22528, 14333, 12507, 10406, 8887, 18403, 12644, 21414, 7667, 8007, 6450, 5301, 4780, 2418, 2860, 7403, 21524, 27229, 9029, 0, 2747, 726, 2749],
- [10084, 20956, 14618, 12135, 38935, 8306, 9793, 2615, 5850, 10467, 9918, 14568, 13907, 11803, 11750, 13657, 6901, 23862, 16125, 14748, 12981, 11624, 21033, 15358, 24144, 10304, 10742, 9094, 8042, 7408, 4580, 4072, 8446, 20543, 26181, 7668, 2747, 0, 3330, 5313],
- [13026, 23963, 17563, 14771, 42160, 11069, 12925, 5730, 8778, 13375, 11235, 14366, 13621, 11188, 10424, 11907, 5609, 21861, 13624, 11781, 9718, 8304, 17737, 12200, 20816, 7330, 7532, 6117, 4735, 4488, 2599, 3355, 7773, 22186, 27895, 9742, 726, 3330, 0, 2042],
- [15056, 25994, 19589, 16743, 44198, 13078, 14967, 7552, 10422, 14935, 11891, 14002, 13225, 10671, 9475, 10633, 5084, 20315, 11866, 9802, 7682, 6471, 15720, 10674, 18908, 6204, 6000, 5066, 3039, 3721, 3496, 4772, 8614, 23805, 29519, 11614, 2749, 5313, 2042, 0],
-] # yapf: disable
+ [
+ 0,
+ 10938,
+ 4542,
+ 2835,
+ 29441,
+ 2171,
+ 1611,
+ 9208,
+ 9528,
+ 11111,
+ 16120,
+ 22606,
+ 22127,
+ 20627,
+ 21246,
+ 23387,
+ 16697,
+ 33609,
+ 26184,
+ 24772,
+ 22644,
+ 20655,
+ 30492,
+ 23296,
+ 32979,
+ 18141,
+ 19248,
+ 17129,
+ 17192,
+ 15645,
+ 12658,
+ 11210,
+ 12094,
+ 13175,
+ 18162,
+ 4968,
+ 12308,
+ 10084,
+ 13026,
+ 15056,
+ ],
+ [
+ 10938,
+ 0,
+ 6422,
+ 9742,
+ 18988,
+ 12974,
+ 11216,
+ 19715,
+ 19004,
+ 18271,
+ 25070,
+ 31971,
+ 31632,
+ 30571,
+ 31578,
+ 33841,
+ 27315,
+ 43964,
+ 36944,
+ 35689,
+ 33569,
+ 31481,
+ 41360,
+ 33760,
+ 43631,
+ 28730,
+ 29976,
+ 27803,
+ 28076,
+ 26408,
+ 23504,
+ 22025,
+ 22000,
+ 13197,
+ 14936,
+ 15146,
+ 23246,
+ 20956,
+ 23963,
+ 25994,
+ ],
+ [
+ 4542,
+ 6422,
+ 0,
+ 3644,
+ 25173,
+ 6552,
+ 5092,
+ 13584,
+ 13372,
+ 13766,
+ 19805,
+ 26537,
+ 26117,
+ 24804,
+ 25590,
+ 27784,
+ 21148,
+ 37981,
+ 30693,
+ 29315,
+ 27148,
+ 25071,
+ 34943,
+ 27472,
+ 37281,
+ 22389,
+ 23592,
+ 21433,
+ 21655,
+ 20011,
+ 17087,
+ 15612,
+ 15872,
+ 11653,
+ 15666,
+ 8842,
+ 16843,
+ 14618,
+ 17563,
+ 19589,
+ ],
+ [
+ 2835,
+ 9742,
+ 3644,
+ 0,
+ 28681,
+ 3851,
+ 4341,
+ 11660,
+ 12294,
+ 13912,
+ 18893,
+ 25283,
+ 24777,
+ 23173,
+ 23636,
+ 25696,
+ 18950,
+ 35927,
+ 28233,
+ 26543,
+ 24127,
+ 21864,
+ 31765,
+ 24018,
+ 33904,
+ 19005,
+ 20295,
+ 18105,
+ 18551,
+ 16763,
+ 13958,
+ 12459,
+ 12296,
+ 10370,
+ 15331,
+ 5430,
+ 14044,
+ 12135,
+ 14771,
+ 16743,
+ ],
+ [
+ 29441,
+ 18988,
+ 25173,
+ 28681,
+ 0,
+ 31590,
+ 29265,
+ 37173,
+ 35501,
+ 32929,
+ 40239,
+ 47006,
+ 46892,
+ 46542,
+ 48112,
+ 50506,
+ 44539,
+ 60103,
+ 54208,
+ 53557,
+ 51878,
+ 50074,
+ 59849,
+ 52645,
+ 62415,
+ 47544,
+ 48689,
+ 46560,
+ 46567,
+ 45086,
+ 42083,
+ 40648,
+ 40971,
+ 29929,
+ 28493,
+ 34015,
+ 41473,
+ 38935,
+ 42160,
+ 44198,
+ ],
+ [
+ 2171,
+ 12974,
+ 6552,
+ 3851,
+ 31590,
+ 0,
+ 3046,
+ 7856,
+ 8864,
+ 11330,
+ 15411,
+ 21597,
+ 21065,
+ 19382,
+ 19791,
+ 21845,
+ 15099,
+ 32076,
+ 24425,
+ 22848,
+ 20600,
+ 18537,
+ 28396,
+ 21125,
+ 30825,
+ 15975,
+ 17101,
+ 14971,
+ 15104,
+ 13503,
+ 10544,
+ 9080,
+ 9983,
+ 13435,
+ 18755,
+ 2947,
+ 10344,
+ 8306,
+ 11069,
+ 13078,
+ ],
+ [
+ 1611,
+ 11216,
+ 5092,
+ 4341,
+ 29265,
+ 3046,
+ 0,
+ 8526,
+ 8368,
+ 9573,
+ 14904,
+ 21529,
+ 21085,
+ 19719,
+ 20504,
+ 22713,
+ 16118,
+ 32898,
+ 25728,
+ 24541,
+ 22631,
+ 20839,
+ 30584,
+ 23755,
+ 33278,
+ 18557,
+ 19545,
+ 17490,
+ 17309,
+ 15936,
+ 12881,
+ 11498,
+ 12944,
+ 14711,
+ 19589,
+ 5993,
+ 12227,
+ 9793,
+ 12925,
+ 14967,
+ ],
+ [
+ 9208,
+ 19715,
+ 13584,
+ 11660,
+ 37173,
+ 7856,
+ 8526,
+ 0,
+ 3248,
+ 7855,
+ 8245,
+ 13843,
+ 13272,
+ 11526,
+ 12038,
+ 14201,
+ 7599,
+ 24411,
+ 17259,
+ 16387,
+ 15050,
+ 13999,
+ 23134,
+ 17899,
+ 26460,
+ 12894,
+ 13251,
+ 11680,
+ 10455,
+ 9997,
+ 7194,
+ 6574,
+ 10678,
+ 20959,
+ 26458,
+ 8180,
+ 5255,
+ 2615,
+ 5730,
+ 7552,
+ ],
+ [
+ 9528,
+ 19004,
+ 13372,
+ 12294,
+ 35501,
+ 8864,
+ 8368,
+ 3248,
+ 0,
+ 4626,
+ 6598,
+ 13168,
+ 12746,
+ 11567,
+ 12731,
+ 15083,
+ 9120,
+ 25037,
+ 18718,
+ 18433,
+ 17590,
+ 16888,
+ 25630,
+ 20976,
+ 29208,
+ 16055,
+ 16300,
+ 14838,
+ 13422,
+ 13165,
+ 10430,
+ 9813,
+ 13777,
+ 22300,
+ 27564,
+ 10126,
+ 8388,
+ 5850,
+ 8778,
+ 10422,
+ ],
+ [
+ 11111,
+ 18271,
+ 13766,
+ 13912,
+ 32929,
+ 11330,
+ 9573,
+ 7855,
+ 4626,
+ 0,
+ 7318,
+ 14185,
+ 14005,
+ 13655,
+ 15438,
+ 17849,
+ 12839,
+ 27179,
+ 21947,
+ 22230,
+ 21814,
+ 21366,
+ 29754,
+ 25555,
+ 33535,
+ 20674,
+ 20872,
+ 19457,
+ 17961,
+ 17787,
+ 15048,
+ 14372,
+ 18115,
+ 24280,
+ 29101,
+ 13400,
+ 13008,
+ 10467,
+ 13375,
+ 14935,
+ ],
+ [
+ 16120,
+ 25070,
+ 19805,
+ 18893,
+ 40239,
+ 15411,
+ 14904,
+ 8245,
+ 6598,
+ 7318,
+ 0,
+ 6939,
+ 6702,
+ 6498,
+ 8610,
+ 10961,
+ 7744,
+ 19889,
+ 15350,
+ 16403,
+ 16975,
+ 17517,
+ 24357,
+ 22176,
+ 28627,
+ 18093,
+ 17672,
+ 16955,
+ 14735,
+ 15510,
+ 13694,
+ 13768,
+ 18317,
+ 28831,
+ 34148,
+ 16326,
+ 11276,
+ 9918,
+ 11235,
+ 11891,
+ ],
+ [
+ 22606,
+ 31971,
+ 26537,
+ 25283,
+ 47006,
+ 21597,
+ 21529,
+ 13843,
+ 13168,
+ 14185,
+ 6939,
+ 0,
+ 793,
+ 3401,
+ 5562,
+ 6839,
+ 8923,
+ 13433,
+ 11264,
+ 13775,
+ 15853,
+ 17629,
+ 21684,
+ 22315,
+ 26411,
+ 19539,
+ 18517,
+ 18636,
+ 16024,
+ 17632,
+ 16948,
+ 17587,
+ 22131,
+ 34799,
+ 40296,
+ 21953,
+ 14739,
+ 14568,
+ 14366,
+ 14002,
+ ],
+ [
+ 22127,
+ 31632,
+ 26117,
+ 24777,
+ 46892,
+ 21065,
+ 21085,
+ 13272,
+ 12746,
+ 14005,
+ 6702,
+ 793,
+ 0,
+ 2608,
+ 4809,
+ 6215,
+ 8151,
+ 13376,
+ 10702,
+ 13094,
+ 15099,
+ 16845,
+ 21039,
+ 21535,
+ 25744,
+ 18746,
+ 17725,
+ 17845,
+ 15232,
+ 16848,
+ 16197,
+ 16859,
+ 21391,
+ 34211,
+ 39731,
+ 21345,
+ 14006,
+ 13907,
+ 13621,
+ 13225,
+ ],
+ [
+ 20627,
+ 30571,
+ 24804,
+ 23173,
+ 46542,
+ 19382,
+ 19719,
+ 11526,
+ 11567,
+ 13655,
+ 6498,
+ 3401,
+ 2608,
+ 0,
+ 2556,
+ 4611,
+ 5630,
+ 13586,
+ 9157,
+ 11005,
+ 12681,
+ 14285,
+ 19044,
+ 18996,
+ 23644,
+ 16138,
+ 15126,
+ 15240,
+ 12625,
+ 14264,
+ 13736,
+ 14482,
+ 18958,
+ 32292,
+ 37879,
+ 19391,
+ 11621,
+ 11803,
+ 11188,
+ 10671,
+ ],
+ [
+ 21246,
+ 31578,
+ 25590,
+ 23636,
+ 48112,
+ 19791,
+ 20504,
+ 12038,
+ 12731,
+ 15438,
+ 8610,
+ 5562,
+ 4809,
+ 2556,
+ 0,
+ 2411,
+ 4917,
+ 12395,
+ 6757,
+ 8451,
+ 10292,
+ 12158,
+ 16488,
+ 16799,
+ 21097,
+ 14374,
+ 13194,
+ 13590,
+ 10943,
+ 12824,
+ 12815,
+ 13779,
+ 18042,
+ 32259,
+ 37918,
+ 19416,
+ 10975,
+ 11750,
+ 10424,
+ 9475,
+ ],
+ [
+ 23387,
+ 33841,
+ 27784,
+ 25696,
+ 50506,
+ 21845,
+ 22713,
+ 14201,
+ 15083,
+ 17849,
+ 10961,
+ 6839,
+ 6215,
+ 4611,
+ 2411,
+ 0,
+ 6760,
+ 10232,
+ 4567,
+ 7010,
+ 9607,
+ 12003,
+ 14846,
+ 16408,
+ 19592,
+ 14727,
+ 13336,
+ 14109,
+ 11507,
+ 13611,
+ 14104,
+ 15222,
+ 19237,
+ 34013,
+ 39703,
+ 21271,
+ 12528,
+ 13657,
+ 11907,
+ 10633,
+ ],
+ [
+ 16697,
+ 27315,
+ 21148,
+ 18950,
+ 44539,
+ 15099,
+ 16118,
+ 7599,
+ 9120,
+ 12839,
+ 7744,
+ 8923,
+ 8151,
+ 5630,
+ 4917,
+ 6760,
+ 0,
+ 16982,
+ 9699,
+ 9400,
+ 9302,
+ 9823,
+ 16998,
+ 14534,
+ 21042,
+ 10911,
+ 10190,
+ 9900,
+ 7397,
+ 8758,
+ 8119,
+ 8948,
+ 13353,
+ 27354,
+ 33023,
+ 14542,
+ 6106,
+ 6901,
+ 5609,
+ 5084,
+ ],
+ [
+ 33609,
+ 43964,
+ 37981,
+ 35927,
+ 60103,
+ 32076,
+ 32898,
+ 24411,
+ 25037,
+ 27179,
+ 19889,
+ 13433,
+ 13376,
+ 13586,
+ 12395,
+ 10232,
+ 16982,
+ 0,
+ 8843,
+ 12398,
+ 16193,
+ 19383,
+ 16423,
+ 22583,
+ 20997,
+ 22888,
+ 21194,
+ 22640,
+ 20334,
+ 22636,
+ 23801,
+ 25065,
+ 28675,
+ 44048,
+ 49756,
+ 31426,
+ 22528,
+ 23862,
+ 21861,
+ 20315,
+ ],
+ [
+ 26184,
+ 36944,
+ 30693,
+ 28233,
+ 54208,
+ 24425,
+ 25728,
+ 17259,
+ 18718,
+ 21947,
+ 15350,
+ 11264,
+ 10702,
+ 9157,
+ 6757,
+ 4567,
+ 9699,
+ 8843,
+ 0,
+ 3842,
+ 7518,
+ 10616,
+ 10666,
+ 14237,
+ 15515,
+ 14053,
+ 12378,
+ 13798,
+ 11537,
+ 13852,
+ 15276,
+ 16632,
+ 19957,
+ 35660,
+ 41373,
+ 23361,
+ 14333,
+ 16125,
+ 13624,
+ 11866,
+ ],
+ [
+ 24772,
+ 35689,
+ 29315,
+ 26543,
+ 53557,
+ 22848,
+ 24541,
+ 16387,
+ 18433,
+ 22230,
+ 16403,
+ 13775,
+ 13094,
+ 11005,
+ 8451,
+ 7010,
+ 9400,
+ 12398,
+ 3842,
+ 0,
+ 3795,
+ 7014,
+ 8053,
+ 10398,
+ 12657,
+ 10633,
+ 8889,
+ 10569,
+ 8646,
+ 10938,
+ 12906,
+ 14366,
+ 17106,
+ 33171,
+ 38858,
+ 21390,
+ 12507,
+ 14748,
+ 11781,
+ 9802,
+ ],
+ [
+ 22644,
+ 33569,
+ 27148,
+ 24127,
+ 51878,
+ 20600,
+ 22631,
+ 15050,
+ 17590,
+ 21814,
+ 16975,
+ 15853,
+ 15099,
+ 12681,
+ 10292,
+ 9607,
+ 9302,
+ 16193,
+ 7518,
+ 3795,
+ 0,
+ 3250,
+ 8084,
+ 6873,
+ 11763,
+ 6949,
+ 5177,
+ 7050,
+ 5619,
+ 7730,
+ 10187,
+ 11689,
+ 13792,
+ 30012,
+ 35654,
+ 18799,
+ 10406,
+ 12981,
+ 9718,
+ 7682,
+ ],
+ [
+ 20655,
+ 31481,
+ 25071,
+ 21864,
+ 50074,
+ 18537,
+ 20839,
+ 13999,
+ 16888,
+ 21366,
+ 17517,
+ 17629,
+ 16845,
+ 14285,
+ 12158,
+ 12003,
+ 9823,
+ 19383,
+ 10616,
+ 7014,
+ 3250,
+ 0,
+ 9901,
+ 4746,
+ 12531,
+ 3737,
+ 1961,
+ 4036,
+ 3588,
+ 5109,
+ 7996,
+ 9459,
+ 10846,
+ 27094,
+ 32690,
+ 16451,
+ 8887,
+ 11624,
+ 8304,
+ 6471,
+ ],
+ [
+ 30492,
+ 41360,
+ 34943,
+ 31765,
+ 59849,
+ 28396,
+ 30584,
+ 23134,
+ 25630,
+ 29754,
+ 24357,
+ 21684,
+ 21039,
+ 19044,
+ 16488,
+ 14846,
+ 16998,
+ 16423,
+ 10666,
+ 8053,
+ 8084,
+ 9901,
+ 0,
+ 9363,
+ 4870,
+ 13117,
+ 11575,
+ 13793,
+ 13300,
+ 15009,
+ 17856,
+ 19337,
+ 20454,
+ 36551,
+ 42017,
+ 26352,
+ 18403,
+ 21033,
+ 17737,
+ 15720,
+ ],
+ [
+ 23296,
+ 33760,
+ 27472,
+ 24018,
+ 52645,
+ 21125,
+ 23755,
+ 17899,
+ 20976,
+ 25555,
+ 22176,
+ 22315,
+ 21535,
+ 18996,
+ 16799,
+ 16408,
+ 14534,
+ 22583,
+ 14237,
+ 10398,
+ 6873,
+ 4746,
+ 9363,
+ 0,
+ 10020,
+ 5211,
+ 4685,
+ 6348,
+ 7636,
+ 8010,
+ 11074,
+ 12315,
+ 11926,
+ 27537,
+ 32880,
+ 18634,
+ 12644,
+ 15358,
+ 12200,
+ 10674,
+ ],
+ [
+ 32979,
+ 43631,
+ 37281,
+ 33904,
+ 62415,
+ 30825,
+ 33278,
+ 26460,
+ 29208,
+ 33535,
+ 28627,
+ 26411,
+ 25744,
+ 23644,
+ 21097,
+ 19592,
+ 21042,
+ 20997,
+ 15515,
+ 12657,
+ 11763,
+ 12531,
+ 4870,
+ 10020,
+ 0,
+ 14901,
+ 13738,
+ 15855,
+ 16118,
+ 17348,
+ 20397,
+ 21793,
+ 21936,
+ 37429,
+ 42654,
+ 28485,
+ 21414,
+ 24144,
+ 20816,
+ 18908,
+ ],
+ [
+ 18141,
+ 28730,
+ 22389,
+ 19005,
+ 47544,
+ 15975,
+ 18557,
+ 12894,
+ 16055,
+ 20674,
+ 18093,
+ 19539,
+ 18746,
+ 16138,
+ 14374,
+ 14727,
+ 10911,
+ 22888,
+ 14053,
+ 10633,
+ 6949,
+ 3737,
+ 13117,
+ 5211,
+ 14901,
+ 0,
+ 1777,
+ 1217,
+ 3528,
+ 2896,
+ 5892,
+ 7104,
+ 7338,
+ 23517,
+ 29068,
+ 13583,
+ 7667,
+ 10304,
+ 7330,
+ 6204,
+ ],
+ [
+ 19248,
+ 29976,
+ 23592,
+ 20295,
+ 48689,
+ 17101,
+ 19545,
+ 13251,
+ 16300,
+ 20872,
+ 17672,
+ 18517,
+ 17725,
+ 15126,
+ 13194,
+ 13336,
+ 10190,
+ 21194,
+ 12378,
+ 8889,
+ 5177,
+ 1961,
+ 11575,
+ 4685,
+ 13738,
+ 1777,
+ 0,
+ 2217,
+ 2976,
+ 3610,
+ 6675,
+ 8055,
+ 8965,
+ 25197,
+ 30774,
+ 14865,
+ 8007,
+ 10742,
+ 7532,
+ 6000,
+ ],
+ [
+ 17129,
+ 27803,
+ 21433,
+ 18105,
+ 46560,
+ 14971,
+ 17490,
+ 11680,
+ 14838,
+ 19457,
+ 16955,
+ 18636,
+ 17845,
+ 15240,
+ 13590,
+ 14109,
+ 9900,
+ 22640,
+ 13798,
+ 10569,
+ 7050,
+ 4036,
+ 13793,
+ 6348,
+ 15855,
+ 1217,
+ 2217,
+ 0,
+ 2647,
+ 1686,
+ 4726,
+ 6000,
+ 6810,
+ 23060,
+ 28665,
+ 12674,
+ 6450,
+ 9094,
+ 6117,
+ 5066,
+ ],
+ [
+ 17192,
+ 28076,
+ 21655,
+ 18551,
+ 46567,
+ 15104,
+ 17309,
+ 10455,
+ 13422,
+ 17961,
+ 14735,
+ 16024,
+ 15232,
+ 12625,
+ 10943,
+ 11507,
+ 7397,
+ 20334,
+ 11537,
+ 8646,
+ 5619,
+ 3588,
+ 13300,
+ 7636,
+ 16118,
+ 3528,
+ 2976,
+ 2647,
+ 0,
+ 2320,
+ 4593,
+ 6093,
+ 8479,
+ 24542,
+ 30219,
+ 13194,
+ 5301,
+ 8042,
+ 4735,
+ 3039,
+ ],
+ [
+ 15645,
+ 26408,
+ 20011,
+ 16763,
+ 45086,
+ 13503,
+ 15936,
+ 9997,
+ 13165,
+ 17787,
+ 15510,
+ 17632,
+ 16848,
+ 14264,
+ 12824,
+ 13611,
+ 8758,
+ 22636,
+ 13852,
+ 10938,
+ 7730,
+ 5109,
+ 15009,
+ 8010,
+ 17348,
+ 2896,
+ 3610,
+ 1686,
+ 2320,
+ 0,
+ 3086,
+ 4444,
+ 6169,
+ 22301,
+ 27963,
+ 11344,
+ 4780,
+ 7408,
+ 4488,
+ 3721,
+ ],
+ [
+ 12658,
+ 23504,
+ 17087,
+ 13958,
+ 42083,
+ 10544,
+ 12881,
+ 7194,
+ 10430,
+ 15048,
+ 13694,
+ 16948,
+ 16197,
+ 13736,
+ 12815,
+ 14104,
+ 8119,
+ 23801,
+ 15276,
+ 12906,
+ 10187,
+ 7996,
+ 17856,
+ 11074,
+ 20397,
+ 5892,
+ 6675,
+ 4726,
+ 4593,
+ 3086,
+ 0,
+ 1501,
+ 5239,
+ 20390,
+ 26101,
+ 8611,
+ 2418,
+ 4580,
+ 2599,
+ 3496,
+ ],
+ [
+ 11210,
+ 22025,
+ 15612,
+ 12459,
+ 40648,
+ 9080,
+ 11498,
+ 6574,
+ 9813,
+ 14372,
+ 13768,
+ 17587,
+ 16859,
+ 14482,
+ 13779,
+ 15222,
+ 8948,
+ 25065,
+ 16632,
+ 14366,
+ 11689,
+ 9459,
+ 19337,
+ 12315,
+ 21793,
+ 7104,
+ 8055,
+ 6000,
+ 6093,
+ 4444,
+ 1501,
+ 0,
+ 4608,
+ 19032,
+ 24747,
+ 7110,
+ 2860,
+ 4072,
+ 3355,
+ 4772,
+ ],
+ [
+ 12094,
+ 22000,
+ 15872,
+ 12296,
+ 40971,
+ 9983,
+ 12944,
+ 10678,
+ 13777,
+ 18115,
+ 18317,
+ 22131,
+ 21391,
+ 18958,
+ 18042,
+ 19237,
+ 13353,
+ 28675,
+ 19957,
+ 17106,
+ 13792,
+ 10846,
+ 20454,
+ 11926,
+ 21936,
+ 7338,
+ 8965,
+ 6810,
+ 8479,
+ 6169,
+ 5239,
+ 4608,
+ 0,
+ 16249,
+ 21866,
+ 7146,
+ 7403,
+ 8446,
+ 7773,
+ 8614,
+ ],
+ [
+ 13175,
+ 13197,
+ 11653,
+ 10370,
+ 29929,
+ 13435,
+ 14711,
+ 20959,
+ 22300,
+ 24280,
+ 28831,
+ 34799,
+ 34211,
+ 32292,
+ 32259,
+ 34013,
+ 27354,
+ 44048,
+ 35660,
+ 33171,
+ 30012,
+ 27094,
+ 36551,
+ 27537,
+ 37429,
+ 23517,
+ 25197,
+ 23060,
+ 24542,
+ 22301,
+ 20390,
+ 19032,
+ 16249,
+ 0,
+ 5714,
+ 12901,
+ 21524,
+ 20543,
+ 22186,
+ 23805,
+ ],
+ [
+ 18162,
+ 14936,
+ 15666,
+ 15331,
+ 28493,
+ 18755,
+ 19589,
+ 26458,
+ 27564,
+ 29101,
+ 34148,
+ 40296,
+ 39731,
+ 37879,
+ 37918,
+ 39703,
+ 33023,
+ 49756,
+ 41373,
+ 38858,
+ 35654,
+ 32690,
+ 42017,
+ 32880,
+ 42654,
+ 29068,
+ 30774,
+ 28665,
+ 30219,
+ 27963,
+ 26101,
+ 24747,
+ 21866,
+ 5714,
+ 0,
+ 18516,
+ 27229,
+ 26181,
+ 27895,
+ 29519,
+ ],
+ [
+ 4968,
+ 15146,
+ 8842,
+ 5430,
+ 34015,
+ 2947,
+ 5993,
+ 8180,
+ 10126,
+ 13400,
+ 16326,
+ 21953,
+ 21345,
+ 19391,
+ 19416,
+ 21271,
+ 14542,
+ 31426,
+ 23361,
+ 21390,
+ 18799,
+ 16451,
+ 26352,
+ 18634,
+ 28485,
+ 13583,
+ 14865,
+ 12674,
+ 13194,
+ 11344,
+ 8611,
+ 7110,
+ 7146,
+ 12901,
+ 18516,
+ 0,
+ 9029,
+ 7668,
+ 9742,
+ 11614,
+ ],
+ [
+ 12308,
+ 23246,
+ 16843,
+ 14044,
+ 41473,
+ 10344,
+ 12227,
+ 5255,
+ 8388,
+ 13008,
+ 11276,
+ 14739,
+ 14006,
+ 11621,
+ 10975,
+ 12528,
+ 6106,
+ 22528,
+ 14333,
+ 12507,
+ 10406,
+ 8887,
+ 18403,
+ 12644,
+ 21414,
+ 7667,
+ 8007,
+ 6450,
+ 5301,
+ 4780,
+ 2418,
+ 2860,
+ 7403,
+ 21524,
+ 27229,
+ 9029,
+ 0,
+ 2747,
+ 726,
+ 2749,
+ ],
+ [
+ 10084,
+ 20956,
+ 14618,
+ 12135,
+ 38935,
+ 8306,
+ 9793,
+ 2615,
+ 5850,
+ 10467,
+ 9918,
+ 14568,
+ 13907,
+ 11803,
+ 11750,
+ 13657,
+ 6901,
+ 23862,
+ 16125,
+ 14748,
+ 12981,
+ 11624,
+ 21033,
+ 15358,
+ 24144,
+ 10304,
+ 10742,
+ 9094,
+ 8042,
+ 7408,
+ 4580,
+ 4072,
+ 8446,
+ 20543,
+ 26181,
+ 7668,
+ 2747,
+ 0,
+ 3330,
+ 5313,
+ ],
+ [
+ 13026,
+ 23963,
+ 17563,
+ 14771,
+ 42160,
+ 11069,
+ 12925,
+ 5730,
+ 8778,
+ 13375,
+ 11235,
+ 14366,
+ 13621,
+ 11188,
+ 10424,
+ 11907,
+ 5609,
+ 21861,
+ 13624,
+ 11781,
+ 9718,
+ 8304,
+ 17737,
+ 12200,
+ 20816,
+ 7330,
+ 7532,
+ 6117,
+ 4735,
+ 4488,
+ 2599,
+ 3355,
+ 7773,
+ 22186,
+ 27895,
+ 9742,
+ 726,
+ 3330,
+ 0,
+ 2042,
+ ],
+ [
+ 15056,
+ 25994,
+ 19589,
+ 16743,
+ 44198,
+ 13078,
+ 14967,
+ 7552,
+ 10422,
+ 14935,
+ 11891,
+ 14002,
+ 13225,
+ 10671,
+ 9475,
+ 10633,
+ 5084,
+ 20315,
+ 11866,
+ 9802,
+ 7682,
+ 6471,
+ 15720,
+ 10674,
+ 18908,
+ 6204,
+ 6000,
+ 5066,
+ 3039,
+ 3721,
+ 3496,
+ 4772,
+ 8614,
+ 23805,
+ 29519,
+ 11614,
+ 2749,
+ 5313,
+ 2042,
+ 0,
+ ],
+] # yapf: disable
MAX_DISTANCE = 80_000
@@ -67,99 +1707,97 @@
# Create a console solution printer.
def print_solution(manager, routing, assignment):
- """Prints assignment on console."""
- print(f'Objective: {assignment.ObjectiveValue()}')
- # Display dropped nodes.
- dropped_nodes = 'Dropped nodes:'
- for index in range(routing.Size()):
- if routing.IsStart(index) or routing.IsEnd(index):
- continue
- if assignment.Value(routing.NextVar(index)) == index:
- node = manager.IndexToNode(index)
- dropped_nodes += f' {node}({VISIT_VALUES[node]})'
- print(dropped_nodes)
- # Display routes
- index = routing.Start(0)
- plan_output = 'Route for vehicle 0:\n'
- route_distance = 0
- value_collected = 0
- while not routing.IsEnd(index):
- node = manager.IndexToNode(index)
- value_collected += VISIT_VALUES[node]
- plan_output += f' {node} ->'
- previous_index = index
- index = assignment.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
-
- plan_output += f' {manager.IndexToNode(index)}\n'
- plan_output += f'Distance of the route: {route_distance}m\n'
- plan_output += f'Value collected: {value_collected}/{sum(VISIT_VALUES)}\n'
- print(plan_output)
+ """Prints assignment on console."""
+ print(f'Objective: {assignment.ObjectiveValue()}')
+ # Display dropped nodes.
+ dropped_nodes = 'Dropped nodes:'
+ for index in range(routing.Size()):
+ if routing.IsStart(index) or routing.IsEnd(index):
+ continue
+ if assignment.Value(routing.NextVar(index)) == index:
+ node = manager.IndexToNode(index)
+ dropped_nodes += f' {node}({VISIT_VALUES[node]})'
+ print(dropped_nodes)
+ # Display routes
+ index = routing.Start(0)
+ plan_output = 'Route for vehicle 0:\n'
+ route_distance = 0
+ value_collected = 0
+ while not routing.IsEnd(index):
+ node = manager.IndexToNode(index)
+ value_collected += VISIT_VALUES[node]
+ plan_output += f' {node} ->'
+ previous_index = index
+ index = assignment.Value(routing.NextVar(index))
+ route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
+
+ plan_output += f' {manager.IndexToNode(index)}\n'
+ plan_output += f'Distance of the route: {route_distance}m\n'
+ plan_output += f'Value collected: {value_collected}/{sum(VISIT_VALUES)}\n'
+ print(plan_output)
def main():
- """Entry point of the program."""
- num_nodes = len(DISTANCE_MATRIX)
- print(f'Num nodes = {num_nodes}')
- num_vehicles = 1
- depot = 0
- all_nodes = range(num_nodes)
-
- # Create the routing index manager.
- manager = pywrapcp.RoutingIndexManager(
- num_nodes,
- num_vehicles,
- depot)
-
- # Create routing model.
- routing = pywrapcp.RoutingModel(manager)
-
- # Create and register a transit callback.
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return DISTANCE_MATRIX[from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
-
- # Define cost of each arc.
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
-
- # Limit Vehicle distance.
- dimension_name = 'Distance'
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- MAX_DISTANCE, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name)
- #distance_dimension = routing.GetDimensionOrDie(dimension_name)
- #distance_dimension.SetGlobalSpanCostCoefficient(100)
-
- # Allow to drop nodes.
- for node in range(1, num_nodes):
- routing.AddDisjunction(
- [manager.NodeToIndex(node)],
- VISIT_VALUES[node])
-
- # Setting first solution heuristic.
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
- search_parameters.time_limit.FromSeconds(15)
- #search_parameters.log_search = True
-
- # Solve the problem.
- assignment = routing.SolveWithParameters(search_parameters)
-
- # Print solution on console.
- if assignment:
- print_solution(manager, routing, assignment)
+ """Entry point of the program."""
+ num_nodes = len(DISTANCE_MATRIX)
+ print(f'Num nodes = {num_nodes}')
+ num_vehicles = 1
+ depot = 0
+ all_nodes = range(num_nodes)
+
+ # Create the routing index manager.
+ manager = pywraprouting.RoutingIndexManager(num_nodes, num_vehicles, depot)
+
+ # Create routing model.
+ routing = pywraprouting.RoutingModel(manager)
+
+ # Create and register a transit callback.
+ def distance_callback(from_index, to_index):
+ """Returns the distance between the two nodes."""
+ # Convert from routing variable Index to distance matrix NodeIndex.
+ from_node = manager.IndexToNode(from_index)
+ to_node = manager.IndexToNode(to_index)
+ return DISTANCE_MATRIX[from_node][to_node]
+
+ transit_callback_index = routing.RegisterTransitCallback(distance_callback)
+
+ # Define cost of each arc.
+ routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
+
+ # Limit Vehicle distance.
+ dimension_name = 'Distance'
+ routing.AddDimension(
+ transit_callback_index,
+ 0, # no slack
+ MAX_DISTANCE, # vehicle maximum travel distance
+ True, # start cumul to zero
+ dimension_name,
+ )
+ # distance_dimension = routing.GetDimensionOrDie(dimension_name)
+ # distance_dimension.SetGlobalSpanCostCoefficient(100)
+
+ # Allow to drop nodes.
+ for node in range(1, num_nodes):
+ routing.AddDisjunction([manager.NodeToIndex(node)], VISIT_VALUES[node])
+
+ # Setting first solution heuristic.
+ search_parameters = pywraprouting.DefaultRoutingSearchParameters()
+ search_parameters.first_solution_strategy = (
+ enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
+ )
+ search_parameters.local_search_metaheuristic = (
+ enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
+ )
+ search_parameters.time_limit.FromSeconds(15)
+ # search_parameters.log_search = True
+
+ # Solve the problem.
+ assignment = routing.SolveWithParameters(search_parameters)
+
+ # Print solution on console.
+ if assignment:
+ print_solution(manager, routing, assignment)
if __name__ == '__main__':
- main()
+ main()
diff --git a/examples/python/prize_collecting_tsp_sat.py b/examples/python/prize_collecting_tsp_sat.py
index 11ba7a0879a..2e351b37c7d 100644
--- a/examples/python/prize_collecting_tsp_sat.py
+++ b/examples/python/prize_collecting_tsp_sat.py
@@ -77,110 +77,110 @@ def print_solution(
used_arcs: dict[tuple[int, int], cp_model.IntVar],
num_nodes: int,
) -> None:
- """Prints solution on console."""
- # Display dropped nodes.
- dropped_nodes = "Dropped nodes:"
- for i in range(num_nodes):
- if i == 0:
- continue
- if not solver.boolean_value(visited_nodes[i]):
- dropped_nodes += f" {i}({VISIT_VALUES[i]})"
- print(dropped_nodes)
- # Display routes
- current_node = 0
- plan_output = "Route for vehicle 0:\n"
- route_distance = 0
- value_collected = 0
- route_is_finished = False
- while not route_is_finished:
- value_collected += VISIT_VALUES[current_node]
- plan_output += f" {current_node} ->"
- # find next node
- for node in range(num_nodes):
- if node == current_node:
- continue
- if solver.boolean_value(used_arcs[current_node, node]):
- route_distance += DISTANCE_MATRIX[current_node][node]
- current_node = node
- if current_node == 0:
- route_is_finished = True
- break
- plan_output += f" {current_node}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- plan_output += f"value collected: {value_collected}/{sum(VISIT_VALUES)}\n"
- print(plan_output)
+ """Prints solution on console."""
+ # Display dropped nodes.
+ dropped_nodes = "Dropped nodes:"
+ for i in range(num_nodes):
+ if i == 0:
+ continue
+ if not solver.boolean_value(visited_nodes[i]):
+ dropped_nodes += f" {i}({VISIT_VALUES[i]})"
+ print(dropped_nodes)
+ # Display routes
+ current_node = 0
+ plan_output = "Route for vehicle 0:\n"
+ route_distance = 0
+ value_collected = 0
+ route_is_finished = False
+ while not route_is_finished:
+ value_collected += VISIT_VALUES[current_node]
+ plan_output += f" {current_node} ->"
+ # find next node
+ for node in range(num_nodes):
+ if node == current_node:
+ continue
+ if solver.boolean_value(used_arcs[current_node, node]):
+ route_distance += DISTANCE_MATRIX[current_node][node]
+ current_node = node
+ if current_node == 0:
+ route_is_finished = True
+ break
+ plan_output += f" {current_node}\n"
+ plan_output += f"Distance of the route: {route_distance}m\n"
+ plan_output += f"value collected: {value_collected}/{sum(VISIT_VALUES)}\n"
+ print(plan_output)
def prize_collecting_tsp():
- """Entry point of the program."""
- num_nodes = len(DISTANCE_MATRIX)
- all_nodes = range(num_nodes)
- print(f"Num nodes = {num_nodes}")
-
- # Model.
- model = cp_model.CpModel()
-
- obj_vars = []
- obj_coeffs = []
- visited_nodes = []
- used_arcs = {}
-
- # Create the circuit constraint.
- arcs = []
- for i in all_nodes:
- is_visited = model.new_bool_var(f"{i} is visited")
- arcs.append((i, i, ~is_visited))
-
- obj_vars.append(is_visited)
- obj_coeffs.append(VISIT_VALUES[i])
- visited_nodes.append(is_visited)
-
- for j in all_nodes:
- if i == j:
- used_arcs[i, j] = ~is_visited
- continue
- arc_is_used = model.new_bool_var(f"{j} follows {i}")
- arcs.append((i, j, arc_is_used))
-
- obj_vars.append(arc_is_used)
- obj_coeffs.append(-DISTANCE_MATRIX[i][j])
- used_arcs[i, j] = arc_is_used
-
- model.add_circuit(arcs)
-
- # Node 0 must be visited.
- model.add(visited_nodes[0] == 1)
-
- # limit the route distance
- model.add(
- sum(
- used_arcs[i, j] * DISTANCE_MATRIX[i][j]
- for i in all_nodes
- for j in all_nodes
- )
- <= MAX_DISTANCE
- )
-
- # Maximize visited node values minus the travelled distance.
- model.maximize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
-
- # Solve and print out the solution.
- solver = cp_model.CpSolver()
- # To benefit from the linearization of the circuit constraint.
- solver.parameters.max_time_in_seconds = 15.0
- solver.parameters.num_search_workers = 8
- solver.parameters.log_search_progress = True
-
- status = solver.solve(model)
- if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:
- print_solution(solver, visited_nodes, used_arcs, num_nodes)
+ """Entry point of the program."""
+ num_nodes = len(DISTANCE_MATRIX)
+ all_nodes = range(num_nodes)
+ print(f"Num nodes = {num_nodes}")
+
+ # Model.
+ model = cp_model.CpModel()
+
+ obj_vars = []
+ obj_coeffs = []
+ visited_nodes = []
+ used_arcs = {}
+
+ # Create the circuit constraint.
+ arcs = []
+ for i in all_nodes:
+ is_visited = model.new_bool_var(f"{i} is visited")
+ arcs.append((i, i, ~is_visited))
+
+ obj_vars.append(is_visited)
+ obj_coeffs.append(VISIT_VALUES[i])
+ visited_nodes.append(is_visited)
+
+ for j in all_nodes:
+ if i == j:
+ used_arcs[i, j] = ~is_visited
+ continue
+ arc_is_used = model.new_bool_var(f"{j} follows {i}")
+ arcs.append((i, j, arc_is_used))
+
+ obj_vars.append(arc_is_used)
+ obj_coeffs.append(-DISTANCE_MATRIX[i][j])
+ used_arcs[i, j] = arc_is_used
+
+ model.add_circuit(arcs)
+
+ # Node 0 must be visited.
+ model.add(visited_nodes[0] == 1)
+
+ # limit the route distance
+ model.add(
+ sum(
+ used_arcs[i, j] * DISTANCE_MATRIX[i][j]
+ for i in all_nodes
+ for j in all_nodes
+ )
+ <= MAX_DISTANCE
+ )
+
+ # Maximize visited node values minus the travelled distance.
+ model.maximize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
+
+ # Solve and print out the solution.
+ solver = cp_model.CpSolver()
+ # To benefit from the linearization of the circuit constraint.
+ solver.parameters.max_time_in_seconds = 15.0
+ solver.parameters.num_search_workers = 8
+ solver.parameters.log_search_progress = True
+
+ status = solver.solve(model)
+ if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:
+ print_solution(solver, visited_nodes, used_arcs, num_nodes)
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- prize_collecting_tsp()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ prize_collecting_tsp()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/prize_collecting_vrp.py b/examples/python/prize_collecting_vrp.py
index 493f763e839..0e3cf0a290b 100755
--- a/examples/python/prize_collecting_vrp.py
+++ b/examples/python/prize_collecting_vrp.py
@@ -13,51 +13,1691 @@
# limitations under the License.
"""Simple prize collecting VRP problem with a max distance."""
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
+from ortools.routing import enums_pb2
+from ortools.routing import pywraprouting
DISTANCE_MATRIX = [
- [0, 10938, 4542, 2835, 29441, 2171, 1611, 9208, 9528, 11111, 16120, 22606, 22127, 20627, 21246, 23387, 16697, 33609, 26184, 24772, 22644, 20655, 30492, 23296, 32979, 18141, 19248, 17129, 17192, 15645, 12658, 11210, 12094, 13175, 18162, 4968, 12308, 10084, 13026, 15056],
- [10938, 0, 6422, 9742, 18988, 12974, 11216, 19715, 19004, 18271, 25070, 31971, 31632, 30571, 31578, 33841, 27315, 43964, 36944, 35689, 33569, 31481, 41360, 33760, 43631, 28730, 29976, 27803, 28076, 26408, 23504, 22025, 22000, 13197, 14936, 15146, 23246, 20956, 23963, 25994],
- [4542, 6422, 0, 3644, 25173, 6552, 5092, 13584, 13372, 13766, 19805, 26537, 26117, 24804, 25590, 27784, 21148, 37981, 30693, 29315, 27148, 25071, 34943, 27472, 37281, 22389, 23592, 21433, 21655, 20011, 17087, 15612, 15872, 11653, 15666, 8842, 16843, 14618, 17563, 19589],
- [2835, 9742, 3644, 0, 28681, 3851, 4341, 11660, 12294, 13912, 18893, 25283, 24777, 23173, 23636, 25696, 18950, 35927, 28233, 26543, 24127, 21864, 31765, 24018, 33904, 19005, 20295, 18105, 18551, 16763, 13958, 12459, 12296, 10370, 15331, 5430, 14044, 12135, 14771, 16743],
- [29441, 18988, 25173, 28681, 0, 31590, 29265, 37173, 35501, 32929, 40239, 47006, 46892, 46542, 48112, 50506, 44539, 60103, 54208, 53557, 51878, 50074, 59849, 52645, 62415, 47544, 48689, 46560, 46567, 45086, 42083, 40648, 40971, 29929, 28493, 34015, 41473, 38935, 42160, 44198],
- [2171, 12974, 6552, 3851, 31590, 0, 3046, 7856, 8864, 11330, 15411, 21597, 21065, 19382, 19791, 21845, 15099, 32076, 24425, 22848, 20600, 18537, 28396, 21125, 30825, 15975, 17101, 14971, 15104, 13503, 10544, 9080, 9983, 13435, 18755, 2947, 10344, 8306, 11069, 13078],
- [1611, 11216, 5092, 4341, 29265, 3046, 0, 8526, 8368, 9573, 14904, 21529, 21085, 19719, 20504, 22713, 16118, 32898, 25728, 24541, 22631, 20839, 30584, 23755, 33278, 18557, 19545, 17490, 17309, 15936, 12881, 11498, 12944, 14711, 19589, 5993, 12227, 9793, 12925, 14967],
- [9208, 19715, 13584, 11660, 37173, 7856, 8526, 0, 3248, 7855, 8245, 13843, 13272, 11526, 12038, 14201, 7599, 24411, 17259, 16387, 15050, 13999, 23134, 17899, 26460, 12894, 13251, 11680, 10455, 9997, 7194, 6574, 10678, 20959, 26458, 8180, 5255, 2615, 5730, 7552],
- [9528, 19004, 13372, 12294, 35501, 8864, 8368, 3248, 0, 4626, 6598, 13168, 12746, 11567, 12731, 15083, 9120, 25037, 18718, 18433, 17590, 16888, 25630, 20976, 29208, 16055, 16300, 14838, 13422, 13165, 10430, 9813, 13777, 22300, 27564, 10126, 8388, 5850, 8778, 10422],
- [11111, 18271, 13766, 13912, 32929, 11330, 9573, 7855, 4626, 0, 7318, 14185, 14005, 13655, 15438, 17849, 12839, 27179, 21947, 22230, 21814, 21366, 29754, 25555, 33535, 20674, 20872, 19457, 17961, 17787, 15048, 14372, 18115, 24280, 29101, 13400, 13008, 10467, 13375, 14935],
- [16120, 25070, 19805, 18893, 40239, 15411, 14904, 8245, 6598, 7318, 0, 6939, 6702, 6498, 8610, 10961, 7744, 19889, 15350, 16403, 16975, 17517, 24357, 22176, 28627, 18093, 17672, 16955, 14735, 15510, 13694, 13768, 18317, 28831, 34148, 16326, 11276, 9918, 11235, 11891],
- [22606, 31971, 26537, 25283, 47006, 21597, 21529, 13843, 13168, 14185, 6939, 0, 793, 3401, 5562, 6839, 8923, 13433, 11264, 13775, 15853, 17629, 21684, 22315, 26411, 19539, 18517, 18636, 16024, 17632, 16948, 17587, 22131, 34799, 40296, 21953, 14739, 14568, 14366, 14002],
- [22127, 31632, 26117, 24777, 46892, 21065, 21085, 13272, 12746, 14005, 6702, 793, 0, 2608, 4809, 6215, 8151, 13376, 10702, 13094, 15099, 16845, 21039, 21535, 25744, 18746, 17725, 17845, 15232, 16848, 16197, 16859, 21391, 34211, 39731, 21345, 14006, 13907, 13621, 13225],
- [20627, 30571, 24804, 23173, 46542, 19382, 19719, 11526, 11567, 13655, 6498, 3401, 2608, 0, 2556, 4611, 5630, 13586, 9157, 11005, 12681, 14285, 19044, 18996, 23644, 16138, 15126, 15240, 12625, 14264, 13736, 14482, 18958, 32292, 37879, 19391, 11621, 11803, 11188, 10671],
- [21246, 31578, 25590, 23636, 48112, 19791, 20504, 12038, 12731, 15438, 8610, 5562, 4809, 2556, 0, 2411, 4917, 12395, 6757, 8451, 10292, 12158, 16488, 16799, 21097, 14374, 13194, 13590, 10943, 12824, 12815, 13779, 18042, 32259, 37918, 19416, 10975, 11750, 10424, 9475],
- [23387, 33841, 27784, 25696, 50506, 21845, 22713, 14201, 15083, 17849, 10961, 6839, 6215, 4611, 2411, 0, 6760, 10232, 4567, 7010, 9607, 12003, 14846, 16408, 19592, 14727, 13336, 14109, 11507, 13611, 14104, 15222, 19237, 34013, 39703, 21271, 12528, 13657, 11907, 10633],
- [16697, 27315, 21148, 18950, 44539, 15099, 16118, 7599, 9120, 12839, 7744, 8923, 8151, 5630, 4917, 6760, 0, 16982, 9699, 9400, 9302, 9823, 16998, 14534, 21042, 10911, 10190, 9900, 7397, 8758, 8119, 8948, 13353, 27354, 33023, 14542, 6106, 6901, 5609, 5084],
- [33609, 43964, 37981, 35927, 60103, 32076, 32898, 24411, 25037, 27179, 19889, 13433, 13376, 13586, 12395, 10232, 16982, 0, 8843, 12398, 16193, 19383, 16423, 22583, 20997, 22888, 21194, 22640, 20334, 22636, 23801, 25065, 28675, 44048, 49756, 31426, 22528, 23862, 21861, 20315],
- [26184, 36944, 30693, 28233, 54208, 24425, 25728, 17259, 18718, 21947, 15350, 11264, 10702, 9157, 6757, 4567, 9699, 8843, 0, 3842, 7518, 10616, 10666, 14237, 15515, 14053, 12378, 13798, 11537, 13852, 15276, 16632, 19957, 35660, 41373, 23361, 14333, 16125, 13624, 11866],
- [24772, 35689, 29315, 26543, 53557, 22848, 24541, 16387, 18433, 22230, 16403, 13775, 13094, 11005, 8451, 7010, 9400, 12398, 3842, 0, 3795, 7014, 8053, 10398, 12657, 10633, 8889, 10569, 8646, 10938, 12906, 14366, 17106, 33171, 38858, 21390, 12507, 14748, 11781, 9802],
- [22644, 33569, 27148, 24127, 51878, 20600, 22631, 15050, 17590, 21814, 16975, 15853, 15099, 12681, 10292, 9607, 9302, 16193, 7518, 3795, 0, 3250, 8084, 6873, 11763, 6949, 5177, 7050, 5619, 7730, 10187, 11689, 13792, 30012, 35654, 18799, 10406, 12981, 9718, 7682],
- [20655, 31481, 25071, 21864, 50074, 18537, 20839, 13999, 16888, 21366, 17517, 17629, 16845, 14285, 12158, 12003, 9823, 19383, 10616, 7014, 3250, 0, 9901, 4746, 12531, 3737, 1961, 4036, 3588, 5109, 7996, 9459, 10846, 27094, 32690, 16451, 8887, 11624, 8304, 6471],
- [30492, 41360, 34943, 31765, 59849, 28396, 30584, 23134, 25630, 29754, 24357, 21684, 21039, 19044, 16488, 14846, 16998, 16423, 10666, 8053, 8084, 9901, 0, 9363, 4870, 13117, 11575, 13793, 13300, 15009, 17856, 19337, 20454, 36551, 42017, 26352, 18403, 21033, 17737, 15720],
- [23296, 33760, 27472, 24018, 52645, 21125, 23755, 17899, 20976, 25555, 22176, 22315, 21535, 18996, 16799, 16408, 14534, 22583, 14237, 10398, 6873, 4746, 9363, 0, 10020, 5211, 4685, 6348, 7636, 8010, 11074, 12315, 11926, 27537, 32880, 18634, 12644, 15358, 12200, 10674],
- [32979, 43631, 37281, 33904, 62415, 30825, 33278, 26460, 29208, 33535, 28627, 26411, 25744, 23644, 21097, 19592, 21042, 20997, 15515, 12657, 11763, 12531, 4870, 10020, 0, 14901, 13738, 15855, 16118, 17348, 20397, 21793, 21936, 37429, 42654, 28485, 21414, 24144, 20816, 18908],
- [18141, 28730, 22389, 19005, 47544, 15975, 18557, 12894, 16055, 20674, 18093, 19539, 18746, 16138, 14374, 14727, 10911, 22888, 14053, 10633, 6949, 3737, 13117, 5211, 14901, 0, 1777, 1217, 3528, 2896, 5892, 7104, 7338, 23517, 29068, 13583, 7667, 10304, 7330, 6204],
- [19248, 29976, 23592, 20295, 48689, 17101, 19545, 13251, 16300, 20872, 17672, 18517, 17725, 15126, 13194, 13336, 10190, 21194, 12378, 8889, 5177, 1961, 11575, 4685, 13738, 1777, 0, 2217, 2976, 3610, 6675, 8055, 8965, 25197, 30774, 14865, 8007, 10742, 7532, 6000],
- [17129, 27803, 21433, 18105, 46560, 14971, 17490, 11680, 14838, 19457, 16955, 18636, 17845, 15240, 13590, 14109, 9900, 22640, 13798, 10569, 7050, 4036, 13793, 6348, 15855, 1217, 2217, 0, 2647, 1686, 4726, 6000, 6810, 23060, 28665, 12674, 6450, 9094, 6117, 5066],
- [17192, 28076, 21655, 18551, 46567, 15104, 17309, 10455, 13422, 17961, 14735, 16024, 15232, 12625, 10943, 11507, 7397, 20334, 11537, 8646, 5619, 3588, 13300, 7636, 16118, 3528, 2976, 2647, 0, 2320, 4593, 6093, 8479, 24542, 30219, 13194, 5301, 8042, 4735, 3039],
- [15645, 26408, 20011, 16763, 45086, 13503, 15936, 9997, 13165, 17787, 15510, 17632, 16848, 14264, 12824, 13611, 8758, 22636, 13852, 10938, 7730, 5109, 15009, 8010, 17348, 2896, 3610, 1686, 2320, 0, 3086, 4444, 6169, 22301, 27963, 11344, 4780, 7408, 4488, 3721],
- [12658, 23504, 17087, 13958, 42083, 10544, 12881, 7194, 10430, 15048, 13694, 16948, 16197, 13736, 12815, 14104, 8119, 23801, 15276, 12906, 10187, 7996, 17856, 11074, 20397, 5892, 6675, 4726, 4593, 3086, 0, 1501, 5239, 20390, 26101, 8611, 2418, 4580, 2599, 3496],
- [11210, 22025, 15612, 12459, 40648, 9080, 11498, 6574, 9813, 14372, 13768, 17587, 16859, 14482, 13779, 15222, 8948, 25065, 16632, 14366, 11689, 9459, 19337, 12315, 21793, 7104, 8055, 6000, 6093, 4444, 1501, 0, 4608, 19032, 24747, 7110, 2860, 4072, 3355, 4772],
- [12094, 22000, 15872, 12296, 40971, 9983, 12944, 10678, 13777, 18115, 18317, 22131, 21391, 18958, 18042, 19237, 13353, 28675, 19957, 17106, 13792, 10846, 20454, 11926, 21936, 7338, 8965, 6810, 8479, 6169, 5239, 4608, 0, 16249, 21866, 7146, 7403, 8446, 7773, 8614],
- [13175, 13197, 11653, 10370, 29929, 13435, 14711, 20959, 22300, 24280, 28831, 34799, 34211, 32292, 32259, 34013, 27354, 44048, 35660, 33171, 30012, 27094, 36551, 27537, 37429, 23517, 25197, 23060, 24542, 22301, 20390, 19032, 16249, 0, 5714, 12901, 21524, 20543, 22186, 23805],
- [18162, 14936, 15666, 15331, 28493, 18755, 19589, 26458, 27564, 29101, 34148, 40296, 39731, 37879, 37918, 39703, 33023, 49756, 41373, 38858, 35654, 32690, 42017, 32880, 42654, 29068, 30774, 28665, 30219, 27963, 26101, 24747, 21866, 5714, 0, 18516, 27229, 26181, 27895, 29519],
- [4968, 15146, 8842, 5430, 34015, 2947, 5993, 8180, 10126, 13400, 16326, 21953, 21345, 19391, 19416, 21271, 14542, 31426, 23361, 21390, 18799, 16451, 26352, 18634, 28485, 13583, 14865, 12674, 13194, 11344, 8611, 7110, 7146, 12901, 18516, 0, 9029, 7668, 9742, 11614],
- [12308, 23246, 16843, 14044, 41473, 10344, 12227, 5255, 8388, 13008, 11276, 14739, 14006, 11621, 10975, 12528, 6106, 22528, 14333, 12507, 10406, 8887, 18403, 12644, 21414, 7667, 8007, 6450, 5301, 4780, 2418, 2860, 7403, 21524, 27229, 9029, 0, 2747, 726, 2749],
- [10084, 20956, 14618, 12135, 38935, 8306, 9793, 2615, 5850, 10467, 9918, 14568, 13907, 11803, 11750, 13657, 6901, 23862, 16125, 14748, 12981, 11624, 21033, 15358, 24144, 10304, 10742, 9094, 8042, 7408, 4580, 4072, 8446, 20543, 26181, 7668, 2747, 0, 3330, 5313],
- [13026, 23963, 17563, 14771, 42160, 11069, 12925, 5730, 8778, 13375, 11235, 14366, 13621, 11188, 10424, 11907, 5609, 21861, 13624, 11781, 9718, 8304, 17737, 12200, 20816, 7330, 7532, 6117, 4735, 4488, 2599, 3355, 7773, 22186, 27895, 9742, 726, 3330, 0, 2042],
- [15056, 25994, 19589, 16743, 44198, 13078, 14967, 7552, 10422, 14935, 11891, 14002, 13225, 10671, 9475, 10633, 5084, 20315, 11866, 9802, 7682, 6471, 15720, 10674, 18908, 6204, 6000, 5066, 3039, 3721, 3496, 4772, 8614, 23805, 29519, 11614, 2749, 5313, 2042, 0],
-] # yapf: disable
+ [
+ 0,
+ 10938,
+ 4542,
+ 2835,
+ 29441,
+ 2171,
+ 1611,
+ 9208,
+ 9528,
+ 11111,
+ 16120,
+ 22606,
+ 22127,
+ 20627,
+ 21246,
+ 23387,
+ 16697,
+ 33609,
+ 26184,
+ 24772,
+ 22644,
+ 20655,
+ 30492,
+ 23296,
+ 32979,
+ 18141,
+ 19248,
+ 17129,
+ 17192,
+ 15645,
+ 12658,
+ 11210,
+ 12094,
+ 13175,
+ 18162,
+ 4968,
+ 12308,
+ 10084,
+ 13026,
+ 15056,
+ ],
+ [
+ 10938,
+ 0,
+ 6422,
+ 9742,
+ 18988,
+ 12974,
+ 11216,
+ 19715,
+ 19004,
+ 18271,
+ 25070,
+ 31971,
+ 31632,
+ 30571,
+ 31578,
+ 33841,
+ 27315,
+ 43964,
+ 36944,
+ 35689,
+ 33569,
+ 31481,
+ 41360,
+ 33760,
+ 43631,
+ 28730,
+ 29976,
+ 27803,
+ 28076,
+ 26408,
+ 23504,
+ 22025,
+ 22000,
+ 13197,
+ 14936,
+ 15146,
+ 23246,
+ 20956,
+ 23963,
+ 25994,
+ ],
+ [
+ 4542,
+ 6422,
+ 0,
+ 3644,
+ 25173,
+ 6552,
+ 5092,
+ 13584,
+ 13372,
+ 13766,
+ 19805,
+ 26537,
+ 26117,
+ 24804,
+ 25590,
+ 27784,
+ 21148,
+ 37981,
+ 30693,
+ 29315,
+ 27148,
+ 25071,
+ 34943,
+ 27472,
+ 37281,
+ 22389,
+ 23592,
+ 21433,
+ 21655,
+ 20011,
+ 17087,
+ 15612,
+ 15872,
+ 11653,
+ 15666,
+ 8842,
+ 16843,
+ 14618,
+ 17563,
+ 19589,
+ ],
+ [
+ 2835,
+ 9742,
+ 3644,
+ 0,
+ 28681,
+ 3851,
+ 4341,
+ 11660,
+ 12294,
+ 13912,
+ 18893,
+ 25283,
+ 24777,
+ 23173,
+ 23636,
+ 25696,
+ 18950,
+ 35927,
+ 28233,
+ 26543,
+ 24127,
+ 21864,
+ 31765,
+ 24018,
+ 33904,
+ 19005,
+ 20295,
+ 18105,
+ 18551,
+ 16763,
+ 13958,
+ 12459,
+ 12296,
+ 10370,
+ 15331,
+ 5430,
+ 14044,
+ 12135,
+ 14771,
+ 16743,
+ ],
+ [
+ 29441,
+ 18988,
+ 25173,
+ 28681,
+ 0,
+ 31590,
+ 29265,
+ 37173,
+ 35501,
+ 32929,
+ 40239,
+ 47006,
+ 46892,
+ 46542,
+ 48112,
+ 50506,
+ 44539,
+ 60103,
+ 54208,
+ 53557,
+ 51878,
+ 50074,
+ 59849,
+ 52645,
+ 62415,
+ 47544,
+ 48689,
+ 46560,
+ 46567,
+ 45086,
+ 42083,
+ 40648,
+ 40971,
+ 29929,
+ 28493,
+ 34015,
+ 41473,
+ 38935,
+ 42160,
+ 44198,
+ ],
+ [
+ 2171,
+ 12974,
+ 6552,
+ 3851,
+ 31590,
+ 0,
+ 3046,
+ 7856,
+ 8864,
+ 11330,
+ 15411,
+ 21597,
+ 21065,
+ 19382,
+ 19791,
+ 21845,
+ 15099,
+ 32076,
+ 24425,
+ 22848,
+ 20600,
+ 18537,
+ 28396,
+ 21125,
+ 30825,
+ 15975,
+ 17101,
+ 14971,
+ 15104,
+ 13503,
+ 10544,
+ 9080,
+ 9983,
+ 13435,
+ 18755,
+ 2947,
+ 10344,
+ 8306,
+ 11069,
+ 13078,
+ ],
+ [
+ 1611,
+ 11216,
+ 5092,
+ 4341,
+ 29265,
+ 3046,
+ 0,
+ 8526,
+ 8368,
+ 9573,
+ 14904,
+ 21529,
+ 21085,
+ 19719,
+ 20504,
+ 22713,
+ 16118,
+ 32898,
+ 25728,
+ 24541,
+ 22631,
+ 20839,
+ 30584,
+ 23755,
+ 33278,
+ 18557,
+ 19545,
+ 17490,
+ 17309,
+ 15936,
+ 12881,
+ 11498,
+ 12944,
+ 14711,
+ 19589,
+ 5993,
+ 12227,
+ 9793,
+ 12925,
+ 14967,
+ ],
+ [
+ 9208,
+ 19715,
+ 13584,
+ 11660,
+ 37173,
+ 7856,
+ 8526,
+ 0,
+ 3248,
+ 7855,
+ 8245,
+ 13843,
+ 13272,
+ 11526,
+ 12038,
+ 14201,
+ 7599,
+ 24411,
+ 17259,
+ 16387,
+ 15050,
+ 13999,
+ 23134,
+ 17899,
+ 26460,
+ 12894,
+ 13251,
+ 11680,
+ 10455,
+ 9997,
+ 7194,
+ 6574,
+ 10678,
+ 20959,
+ 26458,
+ 8180,
+ 5255,
+ 2615,
+ 5730,
+ 7552,
+ ],
+ [
+ 9528,
+ 19004,
+ 13372,
+ 12294,
+ 35501,
+ 8864,
+ 8368,
+ 3248,
+ 0,
+ 4626,
+ 6598,
+ 13168,
+ 12746,
+ 11567,
+ 12731,
+ 15083,
+ 9120,
+ 25037,
+ 18718,
+ 18433,
+ 17590,
+ 16888,
+ 25630,
+ 20976,
+ 29208,
+ 16055,
+ 16300,
+ 14838,
+ 13422,
+ 13165,
+ 10430,
+ 9813,
+ 13777,
+ 22300,
+ 27564,
+ 10126,
+ 8388,
+ 5850,
+ 8778,
+ 10422,
+ ],
+ [
+ 11111,
+ 18271,
+ 13766,
+ 13912,
+ 32929,
+ 11330,
+ 9573,
+ 7855,
+ 4626,
+ 0,
+ 7318,
+ 14185,
+ 14005,
+ 13655,
+ 15438,
+ 17849,
+ 12839,
+ 27179,
+ 21947,
+ 22230,
+ 21814,
+ 21366,
+ 29754,
+ 25555,
+ 33535,
+ 20674,
+ 20872,
+ 19457,
+ 17961,
+ 17787,
+ 15048,
+ 14372,
+ 18115,
+ 24280,
+ 29101,
+ 13400,
+ 13008,
+ 10467,
+ 13375,
+ 14935,
+ ],
+ [
+ 16120,
+ 25070,
+ 19805,
+ 18893,
+ 40239,
+ 15411,
+ 14904,
+ 8245,
+ 6598,
+ 7318,
+ 0,
+ 6939,
+ 6702,
+ 6498,
+ 8610,
+ 10961,
+ 7744,
+ 19889,
+ 15350,
+ 16403,
+ 16975,
+ 17517,
+ 24357,
+ 22176,
+ 28627,
+ 18093,
+ 17672,
+ 16955,
+ 14735,
+ 15510,
+ 13694,
+ 13768,
+ 18317,
+ 28831,
+ 34148,
+ 16326,
+ 11276,
+ 9918,
+ 11235,
+ 11891,
+ ],
+ [
+ 22606,
+ 31971,
+ 26537,
+ 25283,
+ 47006,
+ 21597,
+ 21529,
+ 13843,
+ 13168,
+ 14185,
+ 6939,
+ 0,
+ 793,
+ 3401,
+ 5562,
+ 6839,
+ 8923,
+ 13433,
+ 11264,
+ 13775,
+ 15853,
+ 17629,
+ 21684,
+ 22315,
+ 26411,
+ 19539,
+ 18517,
+ 18636,
+ 16024,
+ 17632,
+ 16948,
+ 17587,
+ 22131,
+ 34799,
+ 40296,
+ 21953,
+ 14739,
+ 14568,
+ 14366,
+ 14002,
+ ],
+ [
+ 22127,
+ 31632,
+ 26117,
+ 24777,
+ 46892,
+ 21065,
+ 21085,
+ 13272,
+ 12746,
+ 14005,
+ 6702,
+ 793,
+ 0,
+ 2608,
+ 4809,
+ 6215,
+ 8151,
+ 13376,
+ 10702,
+ 13094,
+ 15099,
+ 16845,
+ 21039,
+ 21535,
+ 25744,
+ 18746,
+ 17725,
+ 17845,
+ 15232,
+ 16848,
+ 16197,
+ 16859,
+ 21391,
+ 34211,
+ 39731,
+ 21345,
+ 14006,
+ 13907,
+ 13621,
+ 13225,
+ ],
+ [
+ 20627,
+ 30571,
+ 24804,
+ 23173,
+ 46542,
+ 19382,
+ 19719,
+ 11526,
+ 11567,
+ 13655,
+ 6498,
+ 3401,
+ 2608,
+ 0,
+ 2556,
+ 4611,
+ 5630,
+ 13586,
+ 9157,
+ 11005,
+ 12681,
+ 14285,
+ 19044,
+ 18996,
+ 23644,
+ 16138,
+ 15126,
+ 15240,
+ 12625,
+ 14264,
+ 13736,
+ 14482,
+ 18958,
+ 32292,
+ 37879,
+ 19391,
+ 11621,
+ 11803,
+ 11188,
+ 10671,
+ ],
+ [
+ 21246,
+ 31578,
+ 25590,
+ 23636,
+ 48112,
+ 19791,
+ 20504,
+ 12038,
+ 12731,
+ 15438,
+ 8610,
+ 5562,
+ 4809,
+ 2556,
+ 0,
+ 2411,
+ 4917,
+ 12395,
+ 6757,
+ 8451,
+ 10292,
+ 12158,
+ 16488,
+ 16799,
+ 21097,
+ 14374,
+ 13194,
+ 13590,
+ 10943,
+ 12824,
+ 12815,
+ 13779,
+ 18042,
+ 32259,
+ 37918,
+ 19416,
+ 10975,
+ 11750,
+ 10424,
+ 9475,
+ ],
+ [
+ 23387,
+ 33841,
+ 27784,
+ 25696,
+ 50506,
+ 21845,
+ 22713,
+ 14201,
+ 15083,
+ 17849,
+ 10961,
+ 6839,
+ 6215,
+ 4611,
+ 2411,
+ 0,
+ 6760,
+ 10232,
+ 4567,
+ 7010,
+ 9607,
+ 12003,
+ 14846,
+ 16408,
+ 19592,
+ 14727,
+ 13336,
+ 14109,
+ 11507,
+ 13611,
+ 14104,
+ 15222,
+ 19237,
+ 34013,
+ 39703,
+ 21271,
+ 12528,
+ 13657,
+ 11907,
+ 10633,
+ ],
+ [
+ 16697,
+ 27315,
+ 21148,
+ 18950,
+ 44539,
+ 15099,
+ 16118,
+ 7599,
+ 9120,
+ 12839,
+ 7744,
+ 8923,
+ 8151,
+ 5630,
+ 4917,
+ 6760,
+ 0,
+ 16982,
+ 9699,
+ 9400,
+ 9302,
+ 9823,
+ 16998,
+ 14534,
+ 21042,
+ 10911,
+ 10190,
+ 9900,
+ 7397,
+ 8758,
+ 8119,
+ 8948,
+ 13353,
+ 27354,
+ 33023,
+ 14542,
+ 6106,
+ 6901,
+ 5609,
+ 5084,
+ ],
+ [
+ 33609,
+ 43964,
+ 37981,
+ 35927,
+ 60103,
+ 32076,
+ 32898,
+ 24411,
+ 25037,
+ 27179,
+ 19889,
+ 13433,
+ 13376,
+ 13586,
+ 12395,
+ 10232,
+ 16982,
+ 0,
+ 8843,
+ 12398,
+ 16193,
+ 19383,
+ 16423,
+ 22583,
+ 20997,
+ 22888,
+ 21194,
+ 22640,
+ 20334,
+ 22636,
+ 23801,
+ 25065,
+ 28675,
+ 44048,
+ 49756,
+ 31426,
+ 22528,
+ 23862,
+ 21861,
+ 20315,
+ ],
+ [
+ 26184,
+ 36944,
+ 30693,
+ 28233,
+ 54208,
+ 24425,
+ 25728,
+ 17259,
+ 18718,
+ 21947,
+ 15350,
+ 11264,
+ 10702,
+ 9157,
+ 6757,
+ 4567,
+ 9699,
+ 8843,
+ 0,
+ 3842,
+ 7518,
+ 10616,
+ 10666,
+ 14237,
+ 15515,
+ 14053,
+ 12378,
+ 13798,
+ 11537,
+ 13852,
+ 15276,
+ 16632,
+ 19957,
+ 35660,
+ 41373,
+ 23361,
+ 14333,
+ 16125,
+ 13624,
+ 11866,
+ ],
+ [
+ 24772,
+ 35689,
+ 29315,
+ 26543,
+ 53557,
+ 22848,
+ 24541,
+ 16387,
+ 18433,
+ 22230,
+ 16403,
+ 13775,
+ 13094,
+ 11005,
+ 8451,
+ 7010,
+ 9400,
+ 12398,
+ 3842,
+ 0,
+ 3795,
+ 7014,
+ 8053,
+ 10398,
+ 12657,
+ 10633,
+ 8889,
+ 10569,
+ 8646,
+ 10938,
+ 12906,
+ 14366,
+ 17106,
+ 33171,
+ 38858,
+ 21390,
+ 12507,
+ 14748,
+ 11781,
+ 9802,
+ ],
+ [
+ 22644,
+ 33569,
+ 27148,
+ 24127,
+ 51878,
+ 20600,
+ 22631,
+ 15050,
+ 17590,
+ 21814,
+ 16975,
+ 15853,
+ 15099,
+ 12681,
+ 10292,
+ 9607,
+ 9302,
+ 16193,
+ 7518,
+ 3795,
+ 0,
+ 3250,
+ 8084,
+ 6873,
+ 11763,
+ 6949,
+ 5177,
+ 7050,
+ 5619,
+ 7730,
+ 10187,
+ 11689,
+ 13792,
+ 30012,
+ 35654,
+ 18799,
+ 10406,
+ 12981,
+ 9718,
+ 7682,
+ ],
+ [
+ 20655,
+ 31481,
+ 25071,
+ 21864,
+ 50074,
+ 18537,
+ 20839,
+ 13999,
+ 16888,
+ 21366,
+ 17517,
+ 17629,
+ 16845,
+ 14285,
+ 12158,
+ 12003,
+ 9823,
+ 19383,
+ 10616,
+ 7014,
+ 3250,
+ 0,
+ 9901,
+ 4746,
+ 12531,
+ 3737,
+ 1961,
+ 4036,
+ 3588,
+ 5109,
+ 7996,
+ 9459,
+ 10846,
+ 27094,
+ 32690,
+ 16451,
+ 8887,
+ 11624,
+ 8304,
+ 6471,
+ ],
+ [
+ 30492,
+ 41360,
+ 34943,
+ 31765,
+ 59849,
+ 28396,
+ 30584,
+ 23134,
+ 25630,
+ 29754,
+ 24357,
+ 21684,
+ 21039,
+ 19044,
+ 16488,
+ 14846,
+ 16998,
+ 16423,
+ 10666,
+ 8053,
+ 8084,
+ 9901,
+ 0,
+ 9363,
+ 4870,
+ 13117,
+ 11575,
+ 13793,
+ 13300,
+ 15009,
+ 17856,
+ 19337,
+ 20454,
+ 36551,
+ 42017,
+ 26352,
+ 18403,
+ 21033,
+ 17737,
+ 15720,
+ ],
+ [
+ 23296,
+ 33760,
+ 27472,
+ 24018,
+ 52645,
+ 21125,
+ 23755,
+ 17899,
+ 20976,
+ 25555,
+ 22176,
+ 22315,
+ 21535,
+ 18996,
+ 16799,
+ 16408,
+ 14534,
+ 22583,
+ 14237,
+ 10398,
+ 6873,
+ 4746,
+ 9363,
+ 0,
+ 10020,
+ 5211,
+ 4685,
+ 6348,
+ 7636,
+ 8010,
+ 11074,
+ 12315,
+ 11926,
+ 27537,
+ 32880,
+ 18634,
+ 12644,
+ 15358,
+ 12200,
+ 10674,
+ ],
+ [
+ 32979,
+ 43631,
+ 37281,
+ 33904,
+ 62415,
+ 30825,
+ 33278,
+ 26460,
+ 29208,
+ 33535,
+ 28627,
+ 26411,
+ 25744,
+ 23644,
+ 21097,
+ 19592,
+ 21042,
+ 20997,
+ 15515,
+ 12657,
+ 11763,
+ 12531,
+ 4870,
+ 10020,
+ 0,
+ 14901,
+ 13738,
+ 15855,
+ 16118,
+ 17348,
+ 20397,
+ 21793,
+ 21936,
+ 37429,
+ 42654,
+ 28485,
+ 21414,
+ 24144,
+ 20816,
+ 18908,
+ ],
+ [
+ 18141,
+ 28730,
+ 22389,
+ 19005,
+ 47544,
+ 15975,
+ 18557,
+ 12894,
+ 16055,
+ 20674,
+ 18093,
+ 19539,
+ 18746,
+ 16138,
+ 14374,
+ 14727,
+ 10911,
+ 22888,
+ 14053,
+ 10633,
+ 6949,
+ 3737,
+ 13117,
+ 5211,
+ 14901,
+ 0,
+ 1777,
+ 1217,
+ 3528,
+ 2896,
+ 5892,
+ 7104,
+ 7338,
+ 23517,
+ 29068,
+ 13583,
+ 7667,
+ 10304,
+ 7330,
+ 6204,
+ ],
+ [
+ 19248,
+ 29976,
+ 23592,
+ 20295,
+ 48689,
+ 17101,
+ 19545,
+ 13251,
+ 16300,
+ 20872,
+ 17672,
+ 18517,
+ 17725,
+ 15126,
+ 13194,
+ 13336,
+ 10190,
+ 21194,
+ 12378,
+ 8889,
+ 5177,
+ 1961,
+ 11575,
+ 4685,
+ 13738,
+ 1777,
+ 0,
+ 2217,
+ 2976,
+ 3610,
+ 6675,
+ 8055,
+ 8965,
+ 25197,
+ 30774,
+ 14865,
+ 8007,
+ 10742,
+ 7532,
+ 6000,
+ ],
+ [
+ 17129,
+ 27803,
+ 21433,
+ 18105,
+ 46560,
+ 14971,
+ 17490,
+ 11680,
+ 14838,
+ 19457,
+ 16955,
+ 18636,
+ 17845,
+ 15240,
+ 13590,
+ 14109,
+ 9900,
+ 22640,
+ 13798,
+ 10569,
+ 7050,
+ 4036,
+ 13793,
+ 6348,
+ 15855,
+ 1217,
+ 2217,
+ 0,
+ 2647,
+ 1686,
+ 4726,
+ 6000,
+ 6810,
+ 23060,
+ 28665,
+ 12674,
+ 6450,
+ 9094,
+ 6117,
+ 5066,
+ ],
+ [
+ 17192,
+ 28076,
+ 21655,
+ 18551,
+ 46567,
+ 15104,
+ 17309,
+ 10455,
+ 13422,
+ 17961,
+ 14735,
+ 16024,
+ 15232,
+ 12625,
+ 10943,
+ 11507,
+ 7397,
+ 20334,
+ 11537,
+ 8646,
+ 5619,
+ 3588,
+ 13300,
+ 7636,
+ 16118,
+ 3528,
+ 2976,
+ 2647,
+ 0,
+ 2320,
+ 4593,
+ 6093,
+ 8479,
+ 24542,
+ 30219,
+ 13194,
+ 5301,
+ 8042,
+ 4735,
+ 3039,
+ ],
+ [
+ 15645,
+ 26408,
+ 20011,
+ 16763,
+ 45086,
+ 13503,
+ 15936,
+ 9997,
+ 13165,
+ 17787,
+ 15510,
+ 17632,
+ 16848,
+ 14264,
+ 12824,
+ 13611,
+ 8758,
+ 22636,
+ 13852,
+ 10938,
+ 7730,
+ 5109,
+ 15009,
+ 8010,
+ 17348,
+ 2896,
+ 3610,
+ 1686,
+ 2320,
+ 0,
+ 3086,
+ 4444,
+ 6169,
+ 22301,
+ 27963,
+ 11344,
+ 4780,
+ 7408,
+ 4488,
+ 3721,
+ ],
+ [
+ 12658,
+ 23504,
+ 17087,
+ 13958,
+ 42083,
+ 10544,
+ 12881,
+ 7194,
+ 10430,
+ 15048,
+ 13694,
+ 16948,
+ 16197,
+ 13736,
+ 12815,
+ 14104,
+ 8119,
+ 23801,
+ 15276,
+ 12906,
+ 10187,
+ 7996,
+ 17856,
+ 11074,
+ 20397,
+ 5892,
+ 6675,
+ 4726,
+ 4593,
+ 3086,
+ 0,
+ 1501,
+ 5239,
+ 20390,
+ 26101,
+ 8611,
+ 2418,
+ 4580,
+ 2599,
+ 3496,
+ ],
+ [
+ 11210,
+ 22025,
+ 15612,
+ 12459,
+ 40648,
+ 9080,
+ 11498,
+ 6574,
+ 9813,
+ 14372,
+ 13768,
+ 17587,
+ 16859,
+ 14482,
+ 13779,
+ 15222,
+ 8948,
+ 25065,
+ 16632,
+ 14366,
+ 11689,
+ 9459,
+ 19337,
+ 12315,
+ 21793,
+ 7104,
+ 8055,
+ 6000,
+ 6093,
+ 4444,
+ 1501,
+ 0,
+ 4608,
+ 19032,
+ 24747,
+ 7110,
+ 2860,
+ 4072,
+ 3355,
+ 4772,
+ ],
+ [
+ 12094,
+ 22000,
+ 15872,
+ 12296,
+ 40971,
+ 9983,
+ 12944,
+ 10678,
+ 13777,
+ 18115,
+ 18317,
+ 22131,
+ 21391,
+ 18958,
+ 18042,
+ 19237,
+ 13353,
+ 28675,
+ 19957,
+ 17106,
+ 13792,
+ 10846,
+ 20454,
+ 11926,
+ 21936,
+ 7338,
+ 8965,
+ 6810,
+ 8479,
+ 6169,
+ 5239,
+ 4608,
+ 0,
+ 16249,
+ 21866,
+ 7146,
+ 7403,
+ 8446,
+ 7773,
+ 8614,
+ ],
+ [
+ 13175,
+ 13197,
+ 11653,
+ 10370,
+ 29929,
+ 13435,
+ 14711,
+ 20959,
+ 22300,
+ 24280,
+ 28831,
+ 34799,
+ 34211,
+ 32292,
+ 32259,
+ 34013,
+ 27354,
+ 44048,
+ 35660,
+ 33171,
+ 30012,
+ 27094,
+ 36551,
+ 27537,
+ 37429,
+ 23517,
+ 25197,
+ 23060,
+ 24542,
+ 22301,
+ 20390,
+ 19032,
+ 16249,
+ 0,
+ 5714,
+ 12901,
+ 21524,
+ 20543,
+ 22186,
+ 23805,
+ ],
+ [
+ 18162,
+ 14936,
+ 15666,
+ 15331,
+ 28493,
+ 18755,
+ 19589,
+ 26458,
+ 27564,
+ 29101,
+ 34148,
+ 40296,
+ 39731,
+ 37879,
+ 37918,
+ 39703,
+ 33023,
+ 49756,
+ 41373,
+ 38858,
+ 35654,
+ 32690,
+ 42017,
+ 32880,
+ 42654,
+ 29068,
+ 30774,
+ 28665,
+ 30219,
+ 27963,
+ 26101,
+ 24747,
+ 21866,
+ 5714,
+ 0,
+ 18516,
+ 27229,
+ 26181,
+ 27895,
+ 29519,
+ ],
+ [
+ 4968,
+ 15146,
+ 8842,
+ 5430,
+ 34015,
+ 2947,
+ 5993,
+ 8180,
+ 10126,
+ 13400,
+ 16326,
+ 21953,
+ 21345,
+ 19391,
+ 19416,
+ 21271,
+ 14542,
+ 31426,
+ 23361,
+ 21390,
+ 18799,
+ 16451,
+ 26352,
+ 18634,
+ 28485,
+ 13583,
+ 14865,
+ 12674,
+ 13194,
+ 11344,
+ 8611,
+ 7110,
+ 7146,
+ 12901,
+ 18516,
+ 0,
+ 9029,
+ 7668,
+ 9742,
+ 11614,
+ ],
+ [
+ 12308,
+ 23246,
+ 16843,
+ 14044,
+ 41473,
+ 10344,
+ 12227,
+ 5255,
+ 8388,
+ 13008,
+ 11276,
+ 14739,
+ 14006,
+ 11621,
+ 10975,
+ 12528,
+ 6106,
+ 22528,
+ 14333,
+ 12507,
+ 10406,
+ 8887,
+ 18403,
+ 12644,
+ 21414,
+ 7667,
+ 8007,
+ 6450,
+ 5301,
+ 4780,
+ 2418,
+ 2860,
+ 7403,
+ 21524,
+ 27229,
+ 9029,
+ 0,
+ 2747,
+ 726,
+ 2749,
+ ],
+ [
+ 10084,
+ 20956,
+ 14618,
+ 12135,
+ 38935,
+ 8306,
+ 9793,
+ 2615,
+ 5850,
+ 10467,
+ 9918,
+ 14568,
+ 13907,
+ 11803,
+ 11750,
+ 13657,
+ 6901,
+ 23862,
+ 16125,
+ 14748,
+ 12981,
+ 11624,
+ 21033,
+ 15358,
+ 24144,
+ 10304,
+ 10742,
+ 9094,
+ 8042,
+ 7408,
+ 4580,
+ 4072,
+ 8446,
+ 20543,
+ 26181,
+ 7668,
+ 2747,
+ 0,
+ 3330,
+ 5313,
+ ],
+ [
+ 13026,
+ 23963,
+ 17563,
+ 14771,
+ 42160,
+ 11069,
+ 12925,
+ 5730,
+ 8778,
+ 13375,
+ 11235,
+ 14366,
+ 13621,
+ 11188,
+ 10424,
+ 11907,
+ 5609,
+ 21861,
+ 13624,
+ 11781,
+ 9718,
+ 8304,
+ 17737,
+ 12200,
+ 20816,
+ 7330,
+ 7532,
+ 6117,
+ 4735,
+ 4488,
+ 2599,
+ 3355,
+ 7773,
+ 22186,
+ 27895,
+ 9742,
+ 726,
+ 3330,
+ 0,
+ 2042,
+ ],
+ [
+ 15056,
+ 25994,
+ 19589,
+ 16743,
+ 44198,
+ 13078,
+ 14967,
+ 7552,
+ 10422,
+ 14935,
+ 11891,
+ 14002,
+ 13225,
+ 10671,
+ 9475,
+ 10633,
+ 5084,
+ 20315,
+ 11866,
+ 9802,
+ 7682,
+ 6471,
+ 15720,
+ 10674,
+ 18908,
+ 6204,
+ 6000,
+ 5066,
+ 3039,
+ 3721,
+ 3496,
+ 4772,
+ 8614,
+ 23805,
+ 29519,
+ 11614,
+ 2749,
+ 5313,
+ 2042,
+ 0,
+ ],
+] # yapf: disable
MAX_DISTANCE = 80_000
@@ -67,105 +1707,105 @@
# Create a console solution printer.
def print_solution(manager, routing, assignment):
- """Prints assignment on console."""
- print(f'Objective: {assignment.ObjectiveValue()}')
- # Display dropped nodes.
- dropped_nodes = 'Dropped nodes:'
- for index in range(routing.Size()):
- if routing.IsStart(index) or routing.IsEnd(index):
- continue
- if assignment.Value(routing.NextVar(index)) == index:
- node = manager.IndexToNode(index)
- dropped_nodes += f' {node}({VISIT_VALUES[node]})'
- print(dropped_nodes)
- # Display routes
- total_distance = 0
- total_value_collected = 0
- for v in range(manager.GetNumberOfVehicles()):
- index = routing.Start(v)
- plan_output = f'Route for vehicle {v}:\n'
- route_distance = 0
- value_collected = 0
- while not routing.IsEnd(index):
- node = manager.IndexToNode(index)
- value_collected += VISIT_VALUES[node]
- plan_output += f' {node} ->'
- previous_index = index
- index = assignment.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(previous_index, index, v)
- plan_output += f' {manager.IndexToNode(index)}\n'
- plan_output += f'Distance of the route: {route_distance}m\n'
- plan_output += f'Value collected: {value_collected}\n'
- print(plan_output)
- total_distance += route_distance
- total_value_collected += value_collected
- print(f'Total Distance: {total_distance}m')
- print(f'Total Value collected: {total_value_collected}/{sum(VISIT_VALUES)}')
+ """Prints assignment on console."""
+ print(f'Objective: {assignment.ObjectiveValue()}')
+ # Display dropped nodes.
+ dropped_nodes = 'Dropped nodes:'
+ for index in range(routing.Size()):
+ if routing.IsStart(index) or routing.IsEnd(index):
+ continue
+ if assignment.Value(routing.NextVar(index)) == index:
+ node = manager.IndexToNode(index)
+ dropped_nodes += f' {node}({VISIT_VALUES[node]})'
+ print(dropped_nodes)
+ # Display routes
+ total_distance = 0
+ total_value_collected = 0
+ for v in range(manager.GetNumberOfVehicles()):
+ if not routing.IsVehicleUsed(assignment, v):
+ continue
+ index = routing.Start(v)
+ plan_output = f'Route for vehicle {v}:\n'
+ route_distance = 0
+ value_collected = 0
+ while not routing.IsEnd(index):
+ node = manager.IndexToNode(index)
+ value_collected += VISIT_VALUES[node]
+ plan_output += f' {node} ->'
+ previous_index = index
+ index = assignment.Value(routing.NextVar(index))
+ route_distance += routing.GetArcCostForVehicle(previous_index, index, v)
+ plan_output += f' {manager.IndexToNode(index)}\n'
+ plan_output += f'Distance of the route: {route_distance}m\n'
+ plan_output += f'Value collected: {value_collected}\n'
+ print(plan_output)
+ total_distance += route_distance
+ total_value_collected += value_collected
+ print(f'Total Distance: {total_distance}m')
+ print(f'Total Value collected: {total_value_collected}/{sum(VISIT_VALUES)}')
def main():
- """Entry point of the program."""
- num_nodes = len(DISTANCE_MATRIX)
- print(f'Num nodes = {num_nodes}')
- num_vehicles = 4
- depot = 0
- all_nodes = range(num_nodes)
-
- # Create the routing index manager.
- manager = pywrapcp.RoutingIndexManager(
- num_nodes,
- num_vehicles,
- depot)
-
- # Create routing model.
- routing = pywrapcp.RoutingModel(manager)
-
- # Create and register a transit callback.
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return DISTANCE_MATRIX[from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
-
- # Define cost of each arc.
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
-
- # Limit Vehicle distance.
- dimension_name = 'Distance'
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- MAX_DISTANCE, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name)
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(1)
-
- # Allow to drop nodes.
- for node in range(1, num_nodes):
- routing.AddDisjunction(
- [manager.NodeToIndex(node)],
- VISIT_VALUES[node])
-
- # Setting first solution heuristic.
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH)
- search_parameters.time_limit.FromSeconds(15)
- #search_parameters.log_search = True
-
- # Solve the problem.
- assignment = routing.SolveWithParameters(search_parameters)
-
- # Print solution on console.
- if assignment:
- print_solution(manager, routing, assignment)
+ """Entry point of the program."""
+ num_nodes = len(DISTANCE_MATRIX)
+ print(f'Num nodes = {num_nodes}')
+ num_vehicles = 4
+ depot = 0
+ all_nodes = range(num_nodes)
+
+ # Create the routing index manager.
+ manager = pywraprouting.RoutingIndexManager(num_nodes, num_vehicles, depot)
+
+ # Create routing model.
+ routing = pywraprouting.RoutingModel(manager)
+
+ # Create and register a transit callback.
+ def distance_callback(from_index, to_index):
+ """Returns the distance between the two nodes."""
+ # Convert from routing variable Index to distance matrix NodeIndex.
+ from_node = manager.IndexToNode(from_index)
+ to_node = manager.IndexToNode(to_index)
+ return DISTANCE_MATRIX[from_node][to_node]
+
+ transit_callback_index = routing.RegisterTransitCallback(distance_callback)
+
+ # Define cost of each arc.
+ routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
+
+ # Limit Vehicle distance.
+ dimension_name = 'Distance'
+ routing.AddDimension(
+ transit_callback_index,
+ 0, # no slack
+ MAX_DISTANCE, # vehicle maximum travel distance
+ True, # start cumul to zero
+ dimension_name,
+ )
+ distance_dimension = routing.GetDimensionOrDie(dimension_name)
+ distance_dimension.SetGlobalSpanCostCoefficient(1)
+
+ # Allow to drop nodes.
+ for node in range(1, num_nodes):
+ routing.AddDisjunction([manager.NodeToIndex(node)], VISIT_VALUES[node])
+
+ # Setting first solution heuristic.
+ search_parameters = pywraprouting.DefaultRoutingSearchParameters()
+ search_parameters.first_solution_strategy = (
+ enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
+ )
+ search_parameters.local_search_metaheuristic = (
+ enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
+ )
+ search_parameters.time_limit.FromSeconds(15)
+ # search_parameters.log_search = True
+
+ # Solve the problem.
+ assignment = routing.SolveWithParameters(search_parameters)
+
+ # Print solution on console.
+ if assignment:
+ print_solution(manager, routing, assignment)
if __name__ == '__main__':
- main()
+ main()
diff --git a/examples/python/prize_collecting_vrp_sat.py b/examples/python/prize_collecting_vrp_sat.py
index 210c5aeb652..373030df357 100644
--- a/examples/python/prize_collecting_vrp_sat.py
+++ b/examples/python/prize_collecting_vrp_sat.py
@@ -78,127 +78,128 @@ def print_solution(
num_nodes: int,
num_vehicles: int,
) -> None:
- """Prints solution on console."""
- # Display dropped nodes.
- dropped_nodes = "Dropped nodes:"
- for node in range(num_nodes):
- if node == 0:
- continue
- is_visited = sum(
- [solver.boolean_value(visited_nodes[v][node]) for v in range(num_vehicles)]
- )
- if not is_visited:
- dropped_nodes += f" {node}({VISIT_VALUES[node]})"
- print(dropped_nodes)
- # Display routes
- total_distance = 0
- total_value_collected = 0
- for v in range(num_vehicles):
- current_node = 0
- plan_output = f"Route for vehicle {v}:\n"
- route_distance = 0
- value_collected = 0
- route_is_finished = False
- while not route_is_finished:
- value_collected += VISIT_VALUES[current_node]
- plan_output += f" {current_node} ->"
- # find next node
- for node in range(num_nodes):
- if node == current_node:
- continue
- if solver.boolean_value(used_arcs[v][current_node, node]):
- route_distance += DISTANCE_MATRIX[current_node][node]
- current_node = node
- if current_node == 0:
- route_is_finished = True
- break
- plan_output += f" {current_node}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- plan_output += f"value collected: {value_collected}\n"
- print(plan_output)
- total_distance += route_distance
- total_value_collected += value_collected
- print(f"Total Distance: {total_distance}m")
- print(f"Total value collected: {total_value_collected}/{sum(VISIT_VALUES)}")
+ """Prints solution on console."""
+ # Display dropped nodes.
+ dropped_nodes = "Dropped nodes:"
+ for node in range(num_nodes):
+ if node == 0:
+ continue
+ is_visited = sum([
+ solver.boolean_value(visited_nodes[v][node])
+ for v in range(num_vehicles)
+ ])
+ if not is_visited:
+ dropped_nodes += f" {node}({VISIT_VALUES[node]})"
+ print(dropped_nodes)
+ # Display routes
+ total_distance = 0
+ total_value_collected = 0
+ for v in range(num_vehicles):
+ current_node = 0
+ plan_output = f"Route for vehicle {v}:\n"
+ route_distance = 0
+ value_collected = 0
+ route_is_finished = False
+ while not route_is_finished:
+ value_collected += VISIT_VALUES[current_node]
+ plan_output += f" {current_node} ->"
+ # find next node
+ for node in range(num_nodes):
+ if node == current_node:
+ continue
+ if solver.boolean_value(used_arcs[v][current_node, node]):
+ route_distance += DISTANCE_MATRIX[current_node][node]
+ current_node = node
+ if current_node == 0:
+ route_is_finished = True
+ break
+ plan_output += f" {current_node}\n"
+ plan_output += f"Distance of the route: {route_distance}m\n"
+ plan_output += f"value collected: {value_collected}\n"
+ print(plan_output)
+ total_distance += route_distance
+ total_value_collected += value_collected
+ print(f"Total Distance: {total_distance}m")
+ print(f"Total value collected: {total_value_collected}/{sum(VISIT_VALUES)}")
def prize_collecting_vrp():
- """Entry point of the program."""
- num_nodes = len(DISTANCE_MATRIX)
- num_vehicles = 4
- print(f"Num nodes = {num_nodes}")
-
- # Model.
- model = cp_model.CpModel()
-
- obj_vars = []
- obj_coeffs = []
- visited_nodes = {}
- used_arcs = {}
-
- # Create the circuit constraint.
- all_nodes = range(num_nodes)
- for v in range(num_vehicles):
- visited_nodes[v] = []
- used_arcs[v] = {}
- arcs = []
- for i in all_nodes:
- is_visited = model.new_bool_var(f"{i} is visited")
- arcs.append((i, i, ~is_visited))
-
- obj_vars.append(is_visited)
- obj_coeffs.append(VISIT_VALUES[i])
- visited_nodes[v].append(is_visited)
-
- for j in all_nodes:
- if i == j:
- used_arcs[v][i, j] = ~is_visited
- continue
- arc_is_used = model.new_bool_var(f"{j} follows {i}")
- arcs.append((i, j, arc_is_used))
-
- obj_vars.append(arc_is_used)
- obj_coeffs.append(-DISTANCE_MATRIX[i][j])
- used_arcs[v][i, j] = arc_is_used
-
- model.add_circuit(arcs)
-
- # Node 0 must be visited.
- model.add(visited_nodes[v][0] == 1)
-
- # limit the route distance
- model.add(
- sum(
- used_arcs[v][i, j] * DISTANCE_MATRIX[i][j]
- for i in all_nodes
- for j in all_nodes
- )
- <= MAX_DISTANCE
+ """Entry point of the program."""
+ num_nodes = len(DISTANCE_MATRIX)
+ num_vehicles = 4
+ print(f"Num nodes = {num_nodes}")
+
+ # Model.
+ model = cp_model.CpModel()
+
+ obj_vars = []
+ obj_coeffs = []
+ visited_nodes = {}
+ used_arcs = {}
+
+ # Create the circuit constraint.
+ all_nodes = range(num_nodes)
+ for v in range(num_vehicles):
+ visited_nodes[v] = []
+ used_arcs[v] = {}
+ arcs = []
+ for i in all_nodes:
+ is_visited = model.new_bool_var(f"{i} is visited")
+ arcs.append((i, i, ~is_visited))
+
+ obj_vars.append(is_visited)
+ obj_coeffs.append(VISIT_VALUES[i])
+ visited_nodes[v].append(is_visited)
+
+ for j in all_nodes:
+ if i == j:
+ used_arcs[v][i, j] = ~is_visited
+ continue
+ arc_is_used = model.new_bool_var(f"{j} follows {i}")
+ arcs.append((i, j, arc_is_used))
+
+ obj_vars.append(arc_is_used)
+ obj_coeffs.append(-DISTANCE_MATRIX[i][j])
+ used_arcs[v][i, j] = arc_is_used
+
+ model.add_circuit(arcs)
+
+ # Node 0 must be visited.
+ model.add(visited_nodes[v][0] == 1)
+
+ # limit the route distance
+ model.add(
+ sum(
+ used_arcs[v][i, j] * DISTANCE_MATRIX[i][j]
+ for i in all_nodes
+ for j in all_nodes
)
+ <= MAX_DISTANCE
+ )
- # Each node is visited at most once
- for node in range(1, num_nodes):
- model.add_at_most_one([visited_nodes[v][node] for v in range(num_vehicles)])
+ # Each node is visited at most once
+ for node in range(1, num_nodes):
+ model.add_at_most_one([visited_nodes[v][node] for v in range(num_vehicles)])
- # Maximize visited node values minus the travelled distance.
- model.maximize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
+ # Maximize visited node values minus the travelled distance.
+ model.maximize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
- # Solve and print out the solution.
- solver = cp_model.CpSolver()
- solver.parameters.num_search_workers = 8
- solver.parameters.max_time_in_seconds = 15.0
- solver.parameters.log_search_progress = True
+ # Solve and print out the solution.
+ solver = cp_model.CpSolver()
+ solver.parameters.num_search_workers = 8
+ solver.parameters.max_time_in_seconds = 15.0
+ solver.parameters.log_search_progress = True
- status = solver.solve(model)
- if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:
- print_solution(solver, visited_nodes, used_arcs, num_nodes, num_vehicles)
+ status = solver.solve(model)
+ if status == cp_model.FEASIBLE or status == cp_model.OPTIMAL:
+ print_solution(solver, visited_nodes, used_arcs, num_nodes, num_vehicles)
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- prize_collecting_vrp()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ prize_collecting_vrp()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/proto_solve.py b/examples/python/proto_solve.py
index 523d2f2c202..491774ea4c4 100644
--- a/examples/python/proto_solve.py
+++ b/examples/python/proto_solve.py
@@ -1,4 +1,3 @@
-
from absl import app
from absl import flags
from ortools.linear_solver.python import model_builder
@@ -6,34 +5,38 @@
FLAGS = flags.FLAGS
_INPUT = flags.DEFINE_string('input', '', 'Input file to load and solve.')
-_PARAMS = flags.DEFINE_string('params', '', 'Solver parameters in string format.')
-_SOLVER = flags.DEFINE_string('solver', 'sat', 'Solver type to solve the model with.')
+_PARAMS = flags.DEFINE_string(
+ 'params', '', 'Solver parameters in string format.'
+)
+_SOLVER = flags.DEFINE_string(
+ 'solver', 'sat', 'Solver type to solve the model with.'
+)
def main(_):
- model = model_builder.ModelBuilder()
+ model = model_builder.ModelBuilder()
- # Load MPS file.
- if not model.import_from_mps_file(_INPUT.value):
- print(f'Cannot import MPS file: \'{_INPUT.value}\'')
- return
+ # Load MPS file.
+ if not model.import_from_mps_file(_INPUT.value):
+ print(f"Cannot import MPS file: '{_INPUT.value}'")
+ return
- # Create solver.
- solver = model_builder.ModelSolver(_SOLVER.value)
- if not solver.solver_is_supported():
- print(f'Cannot create solver with name \'{_SOLVER.value}\'')
- return
+ # Create solver.
+ solver = model_builder.ModelSolver(_SOLVER.value)
+ if not solver.solver_is_supported():
+ print(f"Cannot create solver with name '{_SOLVER.value}'")
+ return
- # Set parameters.
- if _PARAMS.value:
- solver.set_solver_specific_parameters(_PARAMS.value)
+ # Set parameters.
+ if _PARAMS.value:
+ solver.set_solver_specific_parameters(_PARAMS.value)
- # Enable the output of the solver.
- solver.enable_output(True)
+ # Enable the output of the solver.
+ solver.enable_output(True)
- # And solve.
- solver.solve(model)
+ # And solve.
+ solver.solve(model)
if __name__ == '__main__':
- app.run(main)
+ app.run(main)
diff --git a/examples/python/pyflow_example.py b/examples/python/pyflow_example.py
index 55db850530f..293ca4d0cf6 100644
--- a/examples/python/pyflow_example.py
+++ b/examples/python/pyflow_example.py
@@ -21,67 +21,72 @@
def max_flow_api():
- """MaxFlow simple interface example."""
- print("MaxFlow on a simple network.")
- tails = [0, 0, 0, 0, 1, 2, 3, 3, 4]
- heads = [1, 2, 3, 4, 3, 4, 4, 5, 5]
- capacities = [5, 8, 5, 3, 4, 5, 6, 6, 4]
- expected_total_flow = 10
- smf = max_flow.SimpleMaxFlow()
- for i in range(0, len(tails)):
- smf.add_arc_with_capacity(tails[i], heads[i], capacities[i])
- if smf.solve(0, 5) == smf.OPTIMAL:
- print("Total flow", smf.optimal_flow(), "/", expected_total_flow)
- for i in range(smf.num_arcs()):
- print(
- "From source %d to target %d: %d / %d"
- % (smf.tail(i), smf.head(i), smf.flow(i), smf.capacity(i))
- )
- print("Source side min-cut:", smf.get_source_side_min_cut())
- print("Sink side min-cut:", smf.get_sink_side_min_cut())
- else:
- print("There was an issue with the max flow input.")
+ """MaxFlow simple interface example."""
+ print("MaxFlow on a simple network.")
+ tails = [0, 0, 0, 0, 1, 2, 3, 3, 4]
+ heads = [1, 2, 3, 4, 3, 4, 4, 5, 5]
+ capacities = [5, 8, 5, 3, 4, 5, 6, 6, 4]
+ expected_total_flow = 10
+ smf = max_flow.SimpleMaxFlow()
+ for i in range(0, len(tails)):
+ smf.add_arc_with_capacity(tails[i], heads[i], capacities[i])
+ if smf.solve(0, 5) == smf.OPTIMAL:
+ print("Total flow", smf.optimal_flow(), "/", expected_total_flow)
+ for i in range(smf.num_arcs()):
+ print(
+ "From source %d to target %d: %d / %d"
+ % (smf.tail(i), smf.head(i), smf.flow(i), smf.capacity(i))
+ )
+ print("Source side min-cut:", smf.get_source_side_min_cut())
+ print("Sink side min-cut:", smf.get_sink_side_min_cut())
+ else:
+ print("There was an issue with the max flow input.")
def min_cost_flow_api():
- """MinCostFlow simple interface example.
+ """MinCostFlow simple interface example.
- Note that this example is actually a linear sum assignment example and will
- be more efficiently solved with the pywrapgraph.LinearSumAssignment class.
- """
- print("MinCostFlow on 4x4 matrix.")
- num_sources = 4
- num_targets = 4
- costs = [[90, 75, 75, 80], [35, 85, 55, 65], [125, 95, 90, 105], [45, 110, 95, 115]]
- expected_cost = 275
- smcf = min_cost_flow.SimpleMinCostFlow()
- for source in range(0, num_sources):
- for target in range(0, num_targets):
- smcf.add_arc_with_capacity_and_unit_cost(
- source, num_sources + target, 1, costs[source][target]
- )
- for node in range(0, num_sources):
- smcf.set_node_supply(node, 1)
- smcf.set_node_supply(num_sources + node, -1)
- status = smcf.solve()
- if status == smcf.OPTIMAL:
- print("Total flow", smcf.optimal_cost(), "/", expected_cost)
- for i in range(0, smcf.num_arcs()):
- if smcf.flow(i) > 0:
- print(
- "From source %d to target %d: cost %d"
- % (smcf.tail(i), smcf.head(i) - num_sources, smcf.unit_cost(i))
- )
- else:
- print("There was an issue with the min cost flow input.")
+ Note that this example is actually a linear sum assignment example and will
+ be more efficiently solved with the pywrapgraph.LinearSumAssignment class.
+ """
+ print("MinCostFlow on 4x4 matrix.")
+ num_sources = 4
+ num_targets = 4
+ costs = [
+ [90, 75, 75, 80],
+ [35, 85, 55, 65],
+ [125, 95, 90, 105],
+ [45, 110, 95, 115],
+ ]
+ expected_cost = 275
+ smcf = min_cost_flow.SimpleMinCostFlow()
+ for source in range(0, num_sources):
+ for target in range(0, num_targets):
+ smcf.add_arc_with_capacity_and_unit_cost(
+ source, num_sources + target, 1, costs[source][target]
+ )
+ for node in range(0, num_sources):
+ smcf.set_node_supply(node, 1)
+ smcf.set_node_supply(num_sources + node, -1)
+ status = smcf.solve()
+ if status == smcf.OPTIMAL:
+ print("Total flow", smcf.optimal_cost(), "/", expected_cost)
+ for i in range(0, smcf.num_arcs()):
+ if smcf.flow(i) > 0:
+ print(
+ "From source %d to target %d: cost %d"
+ % (smcf.tail(i), smcf.head(i) - num_sources, smcf.unit_cost(i))
+ )
+ else:
+ print("There was an issue with the min cost flow input.")
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- max_flow_api()
- min_cost_flow_api()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ max_flow_api()
+ min_cost_flow_api()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/qubo_sat.py b/examples/python/qubo_sat.py
index fb54b9d5c9f..724014ca7d3 100644
--- a/examples/python/qubo_sat.py
+++ b/examples/python/qubo_sat.py
@@ -653,53 +653,53 @@
def solve_qubo() -> None:
- """solve the Qubo problem."""
+ """solve the Qubo problem."""
- # Build the model.
- model = cp_model.CpModel()
+ # Build the model.
+ model = cp_model.CpModel()
- num_vars = len(RAW_DATA)
- all_vars = range(num_vars)
- variables = [model.new_bool_var("x_%i" % i) for i in all_vars]
+ num_vars = len(RAW_DATA)
+ all_vars = range(num_vars)
+ variables = [model.new_bool_var("x_%i" % i) for i in all_vars]
- obj_vars = []
- obj_coeffs = []
+ obj_vars = []
+ obj_coeffs = []
- for i in range(num_vars - 1):
- x_i = variables[i]
- for j in range(i + 1, num_vars):
- coeff = RAW_DATA[i][j] + RAW_DATA[j][i]
- if coeff == 0.0:
- continue
- x_j = variables[j]
- var = model.new_bool_var("")
- model.add_bool_or([~x_i, ~x_j, var])
- model.add_implication(var, x_i)
- model.add_implication(var, x_j)
- obj_vars.append(var)
- obj_coeffs.append(coeff)
+ for i in range(num_vars - 1):
+ x_i = variables[i]
+ for j in range(i + 1, num_vars):
+ coeff = RAW_DATA[i][j] + RAW_DATA[j][i]
+ if coeff == 0.0:
+ continue
+ x_j = variables[j]
+ var = model.new_bool_var("")
+ model.add_bool_or([~x_i, ~x_j, var])
+ model.add_implication(var, x_i)
+ model.add_implication(var, x_j)
+ obj_vars.append(var)
+ obj_coeffs.append(coeff)
- for i in all_vars:
- self_coeff = RAW_DATA[i][i] + RAW_DATA[i][-1]
- if self_coeff != 0.0:
- obj_vars.append(variables[i])
- obj_coeffs.append(self_coeff)
+ for i in all_vars:
+ self_coeff = RAW_DATA[i][i] + RAW_DATA[i][-1]
+ if self_coeff != 0.0:
+ obj_vars.append(variables[i])
+ obj_coeffs.append(self_coeff)
- model.minimize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
+ model.minimize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
- ### Solve model.
- solver = cp_model.CpSolver()
- solver.parameters.num_search_workers = 16
- solver.parameters.log_search_progress = True
- solver.parameters.max_time_in_seconds = 30
- solver.solve(model)
+ ### Solve model.
+ solver = cp_model.CpSolver()
+ solver.parameters.num_search_workers = 16
+ solver.parameters.log_search_progress = True
+ solver.parameters.max_time_in_seconds = 30
+ solver.solve(model)
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- solve_qubo()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ solve_qubo()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/random_tsp.py b/examples/python/random_tsp.py
index 701152fad08..60177a68fba 100755
--- a/examples/python/random_tsp.py
+++ b/examples/python/random_tsp.py
@@ -12,22 +12,22 @@
# limitations under the License.
"""Traveling Salesman Sample.
- This is a sample using the routing library python wrapper to solve a
- Traveling Salesman Problem.
- The description of the problem can be found here:
- http://en.wikipedia.org/wiki/Travelling_salesman_problem.
- The optimization engine uses local search to improve solutions, first
- solutions being generated using a cheapest addition heuristic.
- Optionally one can randomly forbid a set of random connections between nodes
- (forbidden arcs).
+This is a sample using the routing library python wrapper to solve a
+Traveling Salesman Problem.
+The description of the problem can be found here:
+http://en.wikipedia.org/wiki/Travelling_salesman_problem.
+The optimization engine uses local search to improve solutions, first
+solutions being generated using a cheapest addition heuristic.
+Optionally one can randomly forbid a set of random connections between nodes
+(forbidden arcs).
"""
import argparse
from functools import partial
import random
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
+from ortools.routing import enums_pb2
+from ortools.routing import pywraprouting
parser = argparse.ArgumentParser()
@@ -35,114 +35,117 @@
'--tsp_size',
default=10,
type=int,
- help='Size of Traveling Salesman Problem instance.')
+ help='Size of Traveling Salesman Problem instance.',
+)
parser.add_argument(
'--tsp_use_random_matrix',
default=True,
type=bool,
- help='Use random cost matrix.')
+ help='Use random cost matrix.',
+)
parser.add_argument(
'--tsp_random_forbidden_connections',
default=0,
type=int,
- help='Number of random forbidden connections.')
+ help='Number of random forbidden connections.',
+)
parser.add_argument(
- '--tsp_random_seed', default=0, type=int, help='Random seed.')
+ '--tsp_random_seed', default=0, type=int, help='Random seed.'
+)
# Cost/distance functions.
def Distance(manager, i, j):
- """Sample function."""
- # Put your distance code here.
- node_i = manager.IndexToNode(i)
- node_j = manager.IndexToNode(j)
- return node_i + node_j
+ """Sample function."""
+ # Put your distance code here.
+ node_i = manager.IndexToNode(i)
+ node_j = manager.IndexToNode(j)
+ return node_i + node_j
class RandomMatrix(object):
- """Random matrix."""
-
- def __init__(self, size, seed):
- """Initialize random matrix."""
-
- rand = random.Random()
- rand.seed(seed)
- distance_max = 100
- self.matrix = {}
- for from_node in range(size):
- self.matrix[from_node] = {}
- for to_node in range(size):
- if from_node == to_node:
- self.matrix[from_node][to_node] = 0
- else:
- self.matrix[from_node][to_node] = rand.randrange(
- distance_max)
+ """Random matrix."""
+
+ def __init__(self, size, seed):
+ """Initialize random matrix."""
+
+ rand = random.Random()
+ rand.seed(seed)
+ distance_max = 100
+ self.matrix = {}
+ for from_node in range(size):
+ self.matrix[from_node] = {}
+ for to_node in range(size):
+ if from_node == to_node:
+ self.matrix[from_node][to_node] = 0
+ else:
+ self.matrix[from_node][to_node] = rand.randrange(distance_max)
- def Distance(self, manager, from_index, to_index):
- return self.matrix[manager.IndexToNode(from_index)][manager.IndexToNode(
- to_index)]
+ def Distance(self, manager, from_index, to_index):
+ return self.matrix[manager.IndexToNode(from_index)][
+ manager.IndexToNode(to_index)
+ ]
def main(args):
- # Create routing model
- if args.tsp_size > 0:
- # TSP of size args.tsp_size
- # Second argument = 1 to build a single tour (it's a TSP).
- # Nodes are indexed from 0 to args_tsp_size - 1, by default the start of
- # the route is node 0.
- manager = pywrapcp.RoutingIndexManager(args.tsp_size, 1, 0)
- routing = pywrapcp.RoutingModel(manager)
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- # Setting first solution heuristic (cheapest addition).
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC)
-
- # Setting the cost function.
- # Put a callback to the distance accessor here. The callback takes two
- # arguments (the from and to node indices) and returns the distance between
- # these indices.
- cost = 0
- if args.tsp_use_random_matrix:
- matrix = RandomMatrix(args.tsp_size, args.tsp_random_seed)
- cost = routing.RegisterTransitCallback(
- partial(matrix.Distance, manager))
- else:
- cost = routing.RegisterTransitCallback(partial(Distance, manager))
- routing.SetArcCostEvaluatorOfAllVehicles(cost)
- # Forbid node connections (randomly).
- rand = random.Random()
- rand.seed(args.tsp_random_seed)
- forbidden_connections = 0
- while forbidden_connections < args.tsp_random_forbidden_connections:
- from_node = rand.randrange(args.tsp_size - 1)
- to_node = rand.randrange(args.tsp_size - 1) + 1
- if routing.NextVar(from_node).Contains(to_node):
- print('Forbidding connection ' + str(from_node) + ' -> ' +
- str(to_node))
- routing.NextVar(from_node).RemoveValue(to_node)
- forbidden_connections += 1
-
- # Solve, returns a solution if any.
- assignment = routing.Solve()
- if assignment:
- # Solution cost.
- print(assignment.ObjectiveValue())
- # Inspect solution.
- # Only one route here; otherwise iterate from 0 to routing.vehicles() - 1
- route_number = 0
- node = routing.Start(route_number)
- route = ''
- while not routing.IsEnd(node):
- route += str(node) + ' -> '
- node = assignment.Value(routing.NextVar(node))
- route += '0'
- print(route)
- else:
- print('No solution found.')
+ # Create routing model
+ if args.tsp_size > 0:
+ # TSP of size args.tsp_size
+ # Second argument = 1 to build a single tour (it's a TSP).
+ # Nodes are indexed from 0 to args_tsp_size - 1, by default the start of
+ # the route is node 0.
+ manager = pywraprouting.RoutingIndexManager(args.tsp_size, 1, 0)
+ routing = pywraprouting.RoutingModel(manager)
+ search_parameters = pywraprouting.DefaultRoutingSearchParameters()
+ # Setting first solution heuristic (cheapest addition).
+ search_parameters.first_solution_strategy = (
+ enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
+ )
+
+ # Setting the cost function.
+ # Put a callback to the distance accessor here. The callback takes two
+ # arguments (the from and to node indices) and returns the distance between
+ # these indices.
+ cost = 0
+ if args.tsp_use_random_matrix:
+ matrix = RandomMatrix(args.tsp_size, args.tsp_random_seed)
+ cost = routing.RegisterTransitCallback(partial(matrix.Distance, manager))
+ else:
+ cost = routing.RegisterTransitCallback(partial(Distance, manager))
+ routing.SetArcCostEvaluatorOfAllVehicles(cost)
+ # Forbid node connections (randomly).
+ rand = random.Random()
+ rand.seed(args.tsp_random_seed)
+ forbidden_connections = 0
+ while forbidden_connections < args.tsp_random_forbidden_connections:
+ from_node = rand.randrange(args.tsp_size - 1)
+ to_node = rand.randrange(args.tsp_size - 1) + 1
+ if routing.NextVar(from_node).Contains(to_node):
+ print('Forbidding connection ' + str(from_node) + ' -> ' + str(to_node))
+ routing.NextVar(from_node).RemoveValue(to_node)
+ forbidden_connections += 1
+
+ # Solve, returns a solution if any.
+ assignment = routing.Solve()
+ if assignment:
+ # Solution cost.
+ print(assignment.ObjectiveValue())
+ # Inspect solution.
+ # Only one route here; otherwise iterate from 0 to routing.vehicles() - 1
+ route_number = 0
+ node = routing.Start(route_number)
+ route = ''
+ while not routing.IsEnd(node):
+ route += str(node) + ' -> '
+ node = assignment.Value(routing.NextVar(node))
+ route += '0'
+ print(route)
else:
- print('Specify an instance greater than 0.')
+ print('No solution found.')
+ else:
+ print('Specify an instance greater than 0.')
if __name__ == '__main__':
- main(parser.parse_args())
+ main(parser.parse_args())
diff --git a/examples/python/rcpsp_sat.py b/examples/python/rcpsp_sat.py
index 2b78e3d049a..6c7199de0dc 100644
--- a/examples/python/rcpsp_sat.py
+++ b/examples/python/rcpsp_sat.py
@@ -45,48 +45,50 @@
def print_problem_statistics(problem: rcpsp_pb2.RcpspProblem):
- """Display various statistics on the problem."""
-
- # Determine problem type.
- problem_type = (
- "Resource Investment Problem" if problem.is_resource_investment else "RCPSP"
- )
-
- num_resources = len(problem.resources)
- num_tasks = len(problem.tasks) - 2 # 2 sentinels.
- tasks_with_alternatives = 0
- variable_duration_tasks = 0
- tasks_with_delay = 0
-
- for task in problem.tasks:
- if len(task.recipes) > 1:
- tasks_with_alternatives += 1
- duration_0 = task.recipes[0].duration
- for recipe in task.recipes:
- if recipe.duration != duration_0:
- variable_duration_tasks += 1
- break
- if task.successor_delays:
- tasks_with_delay += 1
-
- if problem.is_rcpsp_max:
- problem_type += "/Max delay"
- # We print 2 less tasks as these are sentinel tasks that are not counted in
- # the description of the rcpsp models.
- if problem.is_consumer_producer:
- print(f"Solving {problem_type} with:")
- print(f" - {num_resources} reservoir resources")
- print(f" - {num_tasks} tasks")
- else:
- print(f"Solving {problem_type} with:")
- print(f" - {num_resources} renewable resources")
- print(f" - {num_tasks} tasks")
- if tasks_with_alternatives:
- print(f" - {tasks_with_alternatives} tasks with alternative resources")
- if variable_duration_tasks:
- print(f" - {variable_duration_tasks} tasks with variable durations")
- if tasks_with_delay:
- print(f" - {tasks_with_delay} tasks with successor delays")
+ """Display various statistics on the problem."""
+
+ # Determine problem type.
+ problem_type = (
+ "Resource Investment Problem"
+ if problem.is_resource_investment
+ else "RCPSP"
+ )
+
+ num_resources = len(problem.resources)
+ num_tasks = len(problem.tasks) - 2 # 2 sentinels.
+ tasks_with_alternatives = 0
+ variable_duration_tasks = 0
+ tasks_with_delay = 0
+
+ for task in problem.tasks:
+ if len(task.recipes) > 1:
+ tasks_with_alternatives += 1
+ duration_0 = task.recipes[0].duration
+ for recipe in task.recipes:
+ if recipe.duration != duration_0:
+ variable_duration_tasks += 1
+ break
+ if task.successor_delays:
+ tasks_with_delay += 1
+
+ if problem.is_rcpsp_max:
+ problem_type += "/Max delay"
+ # We print 2 less tasks as these are sentinel tasks that are not counted in
+ # the description of the rcpsp models.
+ if problem.is_consumer_producer:
+ print(f"Solving {problem_type} with:")
+ print(f" - {num_resources} reservoir resources")
+ print(f" - {num_tasks} tasks")
+ else:
+ print(f"Solving {problem_type} with:")
+ print(f" - {num_resources} renewable resources")
+ print(f" - {num_tasks} tasks")
+ if tasks_with_alternatives:
+ print(f" - {tasks_with_alternatives} tasks with alternative resources")
+ if variable_duration_tasks:
+ print(f" - {variable_duration_tasks} tasks with variable durations")
+ if tasks_with_delay:
+ print(f" - {tasks_with_delay} tasks with successor delays")
def solve_rcpsp(
@@ -97,305 +99,307 @@ def solve_rcpsp(
source: int,
sink: int,
) -> None:
- """Parse and solve a given RCPSP problem in proto format.
-
- The model will only look at the tasks {source} + {sink} + active_tasks, and
- ignore all others.
-
- Args:
- problem: the description of the model to solve in protobuf format
- proto_file: the name of the file to export the CpModel proto to.
- params: the string representation of the parameters to pass to the sat
- solver.
- active_tasks: the set of active tasks to consider.
- source: the source task in the graph. Its end will be forced to 0.
- sink: the sink task of the graph. Its start is the makespan of the problem.
-
- Returns:
- (lower_bound of the objective, best solution found, assignment)
- """
- # Create the model.
- model = cp_model.CpModel()
- model.name = problem.name
-
- num_resources = len(problem.resources)
-
- all_active_tasks = list(active_tasks)
- all_active_tasks.sort()
- all_resources = range(num_resources)
-
- horizon = problem.deadline if problem.deadline != -1 else problem.horizon
- if _HORIZON.value > 0:
- horizon = _HORIZON.value
- elif horizon == -1: # Naive computation.
- horizon = sum(max(r.duration for r in t.recipes) for t in problem.tasks)
- if problem.is_rcpsp_max:
- for t in problem.tasks:
- for sd in t.successor_delays:
- for rd in sd.recipe_delays:
- for d in rd.min_delays:
- horizon += abs(d)
- print(f"Horizon = {horizon}", flush=True)
-
- # Containers.
- task_starts = {}
- task_ends = {}
- task_durations = {}
- task_intervals = {}
- task_resource_to_energy = {}
- task_to_resource_demands = collections.defaultdict(list)
-
- task_to_presence_literals = collections.defaultdict(list)
- task_to_recipe_durations = collections.defaultdict(list)
- task_resource_to_fixed_demands = collections.defaultdict(dict)
- task_resource_to_max_energy = collections.defaultdict(int)
-
- resource_to_sum_of_demand_max = collections.defaultdict(int)
-
- # Create task variables.
- for t in all_active_tasks:
- task = problem.tasks[t]
- num_recipes = len(task.recipes)
- all_recipes = range(num_recipes)
-
- start_var = model.new_int_var(0, horizon, f"start_of_task_{t}")
- end_var = model.new_int_var(0, horizon, f"end_of_task_{t}")
+ """Parse and solve a given RCPSP problem in proto format.
+
+ The model will only look at the tasks {source} + {sink} + active_tasks, and
+ ignore all others.
+
+ Args:
+ problem: the description of the model to solve in protobuf format
+ proto_file: the name of the file to export the CpModel proto to.
+ params: the string representation of the parameters to pass to the sat
+ solver.
+ active_tasks: the set of active tasks to consider.
+ source: the source task in the graph. Its end will be forced to 0.
+ sink: the sink task of the graph. Its start is the makespan of the problem.
+
+ Returns:
+ (lower_bound of the objective, best solution found, assignment)
+ """
+ # Create the model.
+ model = cp_model.CpModel()
+ model.name = problem.name
+
+ num_resources = len(problem.resources)
+
+ all_active_tasks = list(active_tasks)
+ all_active_tasks.sort()
+ all_resources = range(num_resources)
+
+ horizon = problem.deadline if problem.deadline != -1 else problem.horizon
+ if _HORIZON.value > 0:
+ horizon = _HORIZON.value
+ elif horizon == -1: # Naive computation.
+ horizon = sum(max(r.duration for r in t.recipes) for t in problem.tasks)
+ if problem.is_rcpsp_max:
+ for t in problem.tasks:
+ for sd in t.successor_delays:
+ for rd in sd.recipe_delays:
+ for d in rd.min_delays:
+ horizon += abs(d)
+ print(f"Horizon = {horizon}", flush=True)
+
+ # Containers.
+ task_starts = {}
+ task_ends = {}
+ task_durations = {}
+ task_intervals = {}
+ task_resource_to_energy = {}
+ task_to_resource_demands = collections.defaultdict(list)
+
+ task_to_presence_literals = collections.defaultdict(list)
+ task_to_recipe_durations = collections.defaultdict(list)
+ task_resource_to_fixed_demands = collections.defaultdict(dict)
+ task_resource_to_max_energy = collections.defaultdict(int)
+
+ resource_to_sum_of_demand_max = collections.defaultdict(int)
+
+ # Create task variables.
+ for t in all_active_tasks:
+ task = problem.tasks[t]
+ num_recipes = len(task.recipes)
+ all_recipes = range(num_recipes)
+
+ start_var = model.new_int_var(0, horizon, f"start_of_task_{t}")
+ end_var = model.new_int_var(0, horizon, f"end_of_task_{t}")
+
+ if num_recipes > 1:
+ # Create one literal per recipe.
+ literals = [
+ model.new_bool_var(f"is_present_{t}_{r}") for r in all_recipes
+ ]
+
+ # Exactly one recipe must be performed.
+ model.add_exactly_one(literals)
- if num_recipes > 1:
- # Create one literal per recipe.
- literals = [model.new_bool_var(f"is_present_{t}_{r}") for r in all_recipes]
+ else:
+ literals = [1]
- # Exactly one recipe must be performed.
- model.add_exactly_one(literals)
+ # Temporary data structure to fill in 0 demands.
+ demand_matrix = collections.defaultdict(int)
- else:
- literals = [1]
+ # Scan recipes and build the demand matrix and the vector of durations.
+ for recipe_index, recipe in enumerate(task.recipes):
+ task_to_recipe_durations[t].append(recipe.duration)
+ for demand, resource in zip(recipe.demands, recipe.resources):
+ demand_matrix[(resource, recipe_index)] = demand
- # Temporary data structure to fill in 0 demands.
- demand_matrix = collections.defaultdict(int)
+ # Create the duration variable from the accumulated durations.
+ duration_var = model.new_int_var_from_domain(
+ cp_model.Domain.from_values(task_to_recipe_durations[t]),
+ f"duration_of_task_{t}",
+ )
- # Scan recipes and build the demand matrix and the vector of durations.
- for recipe_index, recipe in enumerate(task.recipes):
- task_to_recipe_durations[t].append(recipe.duration)
- for demand, resource in zip(recipe.demands, recipe.resources):
- demand_matrix[(resource, recipe_index)] = demand
+ # Link the recipe literals and the duration_var.
+ for r in range(num_recipes):
+ model.add(duration_var == task_to_recipe_durations[t][r]).only_enforce_if(
+ literals[r]
+ )
- # Create the duration variable from the accumulated durations.
- duration_var = model.new_int_var_from_domain(
- cp_model.Domain.from_values(task_to_recipe_durations[t]),
- f"duration_of_task_{t}",
- )
+ # Create the interval of the task.
+ task_interval = model.new_interval_var(
+ start_var, duration_var, end_var, f"task_interval_{t}"
+ )
- # Link the recipe literals and the duration_var.
- for r in range(num_recipes):
- model.add(duration_var == task_to_recipe_durations[t][r]).only_enforce_if(
- literals[r]
- )
+ # Store task variables.
+ task_starts[t] = start_var
+ task_ends[t] = end_var
+ task_durations[t] = duration_var
+ task_intervals[t] = task_interval
+ task_to_presence_literals[t] = literals
- # Create the interval of the task.
- task_interval = model.new_interval_var(
- start_var, duration_var, end_var, f"task_interval_{t}"
+ # Create the demand variable of the task for each resource.
+ for res in all_resources:
+ demands = [demand_matrix[(res, recipe)] for recipe in all_recipes]
+ task_resource_to_fixed_demands[(t, res)] = demands
+ demand_var = model.new_int_var_from_domain(
+ cp_model.Domain.from_values(demands), f"demand_{t}_{res}"
+ )
+ task_to_resource_demands[t].append(demand_var)
+
+ # Link the recipe literals and the demand_var.
+ for r in all_recipes:
+ model.add(demand_var == demand_matrix[(res, r)]).only_enforce_if(
+ literals[r]
)
- # Store task variables.
- task_starts[t] = start_var
- task_ends[t] = end_var
- task_durations[t] = duration_var
- task_intervals[t] = task_interval
- task_to_presence_literals[t] = literals
-
- # Create the demand variable of the task for each resource.
- for res in all_resources:
- demands = [demand_matrix[(res, recipe)] for recipe in all_recipes]
- task_resource_to_fixed_demands[(t, res)] = demands
- demand_var = model.new_int_var_from_domain(
- cp_model.Domain.from_values(demands), f"demand_{t}_{res}"
- )
- task_to_resource_demands[t].append(demand_var)
-
- # Link the recipe literals and the demand_var.
- for r in all_recipes:
- model.add(demand_var == demand_matrix[(res, r)]).only_enforce_if(
- literals[r]
- )
-
- resource_to_sum_of_demand_max[res] += max(demands)
-
- # Create the energy expression for (task, resource):
- for res in all_resources:
- task_resource_to_energy[(t, res)] = sum(
- literals[r]
- * task_to_recipe_durations[t][r]
- * task_resource_to_fixed_demands[(t, res)][r]
- for r in all_recipes
- )
- task_resource_to_max_energy[(t, res)] = max(
- task_to_recipe_durations[t][r]
- * task_resource_to_fixed_demands[(t, res)][r]
- for r in all_recipes
- )
-
- # Create makespan variable
- makespan = model.new_int_var(0, horizon, "makespan")
- makespan_size = model.new_int_var(1, horizon, "interval_makespan_size")
- interval_makespan = model.new_interval_var(
- makespan,
- makespan_size,
- model.new_constant(horizon + 1),
- "interval_makespan",
- )
+ resource_to_sum_of_demand_max[res] += max(demands)
- # Add precedences.
- if problem.is_rcpsp_max:
- # In RCPSP/Max problem, precedences are given and max delay (possible
- # negative) between the starts of two tasks.
- for task_id in all_active_tasks:
- task = problem.tasks[task_id]
- num_modes = len(task.recipes)
-
- for successor_index, next_id in enumerate(task.successors):
- delay_matrix = task.successor_delays[successor_index]
- num_next_modes = len(problem.tasks[next_id].recipes)
- for m1 in range(num_modes):
- s1 = task_starts[task_id]
- p1 = task_to_presence_literals[task_id][m1]
- if next_id == sink:
- delay = delay_matrix.recipe_delays[m1].min_delays[0]
- model.add(s1 + delay <= makespan).only_enforce_if(p1)
- else:
- for m2 in range(num_next_modes):
- delay = delay_matrix.recipe_delays[m1].min_delays[m2]
- s2 = task_starts[next_id]
- p2 = task_to_presence_literals[next_id][m2]
- model.add(s1 + delay <= s2).only_enforce_if([p1, p2])
- else:
- # Normal dependencies (task ends before the start of successors).
- for t in all_active_tasks:
- for n in problem.tasks[t].successors:
- if n == sink:
- model.add(task_ends[t] <= makespan)
- elif n in active_tasks:
- model.add(task_ends[t] <= task_starts[n])
-
- # Containers for resource investment problems.
- capacities = [] # Capacity variables for all resources.
- max_cost = 0 # Upper bound on the investment cost.
-
- # Create resources.
+ # Create the energy expression for (task, resource):
for res in all_resources:
- resource = problem.resources[res]
- c = resource.max_capacity
- if c == -1:
- print(f"No capacity: {resource}")
- c = resource_to_sum_of_demand_max[res]
-
- # RIP problems have only renewable resources, and no makespan.
- if problem.is_resource_investment or resource.renewable:
- intervals = [task_intervals[t] for t in all_active_tasks]
- demands = [task_to_resource_demands[t][res] for t in all_active_tasks]
-
- if problem.is_resource_investment:
- capacity = model.new_int_var(0, c, f"capacity_of_{res}")
- model.add_cumulative(intervals, demands, capacity)
- capacities.append(capacity)
- max_cost += c * resource.unit_cost
- else: # Standard renewable resource.
- if _USE_INTERVAL_MAKESPAN.value:
- intervals.append(interval_makespan)
- demands.append(c)
-
- model.add_cumulative(intervals, demands, c)
- else: # Non empty non renewable resource. (single mode only)
- if problem.is_consumer_producer:
- reservoir_starts = []
- reservoir_demands = []
- for t in all_active_tasks:
- if task_resource_to_fixed_demands[(t, res)][0]:
- reservoir_starts.append(task_starts[t])
- reservoir_demands.append(
- task_resource_to_fixed_demands[(t, res)][0]
- )
- model.add_reservoir_constraint(
- reservoir_starts,
- reservoir_demands,
- resource.min_capacity,
- resource.max_capacity,
- )
- else: # No producer-consumer. We just sum the demands.
- model.add(
- cp_model.LinearExpr.sum(
- [task_to_resource_demands[t][res] for t in all_active_tasks]
- )
- <= c
- )
-
- # Objective.
- if problem.is_resource_investment:
- objective = model.new_int_var(0, max_cost, "capacity_costs")
+ task_resource_to_energy[(t, res)] = sum(
+ literals[r]
+ * task_to_recipe_durations[t][r]
+ * task_resource_to_fixed_demands[(t, res)][r]
+ for r in all_recipes
+ )
+ task_resource_to_max_energy[(t, res)] = max(
+ task_to_recipe_durations[t][r]
+ * task_resource_to_fixed_demands[(t, res)][r]
+ for r in all_recipes
+ )
+
+ # Create makespan variable
+ makespan = model.new_int_var(0, horizon, "makespan")
+ makespan_size = model.new_int_var(1, horizon, "interval_makespan_size")
+ interval_makespan = model.new_interval_var(
+ makespan,
+ makespan_size,
+ model.new_constant(horizon + 1),
+ "interval_makespan",
+ )
+
+ # Add precedences.
+ if problem.is_rcpsp_max:
+ # In RCPSP/Max problem, precedences are given and max delay (possible
+ # negative) between the starts of two tasks.
+ for task_id in all_active_tasks:
+ task = problem.tasks[task_id]
+ num_modes = len(task.recipes)
+
+ for successor_index, next_id in enumerate(task.successors):
+ delay_matrix = task.successor_delays[successor_index]
+ num_next_modes = len(problem.tasks[next_id].recipes)
+ for m1 in range(num_modes):
+ s1 = task_starts[task_id]
+ p1 = task_to_presence_literals[task_id][m1]
+ if next_id == sink:
+ delay = delay_matrix.recipe_delays[m1].min_delays[0]
+ model.add(s1 + delay <= makespan).only_enforce_if(p1)
+ else:
+ for m2 in range(num_next_modes):
+ delay = delay_matrix.recipe_delays[m1].min_delays[m2]
+ s2 = task_starts[next_id]
+ p2 = task_to_presence_literals[next_id][m2]
+ model.add(s1 + delay <= s2).only_enforce_if([p1, p2])
+ else:
+ # Normal dependencies (task ends before the start of successors).
+ for t in all_active_tasks:
+ for n in problem.tasks[t].successors:
+ if n == sink:
+ model.add(task_ends[t] <= makespan)
+ elif n in active_tasks:
+ model.add(task_ends[t] <= task_starts[n])
+
+ # Containers for resource investment problems.
+ capacities = [] # Capacity variables for all resources.
+ max_cost = 0 # Upper bound on the investment cost.
+
+ # Create resources.
+ for res in all_resources:
+ resource = problem.resources[res]
+ c = resource.max_capacity
+ if c == -1:
+ print(f"No capacity: {resource}")
+ c = resource_to_sum_of_demand_max[res]
+
+ # RIP problems have only renewable resources, and no makespan.
+ if problem.is_resource_investment or resource.renewable:
+ intervals = [task_intervals[t] for t in all_active_tasks]
+ demands = [task_to_resource_demands[t][res] for t in all_active_tasks]
+
+ if problem.is_resource_investment:
+ capacity = model.new_int_var(0, c, f"capacity_of_{res}")
+ model.add_cumulative(intervals, demands, capacity)
+ capacities.append(capacity)
+ max_cost += c * resource.unit_cost
+ else: # Standard renewable resource.
+ if _USE_INTERVAL_MAKESPAN.value:
+ intervals.append(interval_makespan)
+ demands.append(c)
+
+ model.add_cumulative(intervals, demands, c)
+ else: # Non empty non renewable resource. (single mode only)
+ if problem.is_consumer_producer:
+ reservoir_starts = []
+ reservoir_demands = []
+ for t in all_active_tasks:
+ if task_resource_to_fixed_demands[(t, res)][0]:
+ reservoir_starts.append(task_starts[t])
+ reservoir_demands.append(
+ task_resource_to_fixed_demands[(t, res)][0]
+ )
+ model.add_reservoir_constraint(
+ reservoir_starts,
+ reservoir_demands,
+ resource.min_capacity,
+ resource.max_capacity,
+ )
+ else: # No producer-consumer. We just sum the demands.
model.add(
- objective
- == sum(
- problem.resources[i].unit_cost * capacities[i]
- for i in range(len(capacities))
+ cp_model.LinearExpr.sum(
+ [task_to_resource_demands[t][res] for t in all_active_tasks]
)
+ <= c
)
- else:
- objective = makespan
- model.minimize(objective)
+ # Objective.
+ if problem.is_resource_investment:
+ objective = model.new_int_var(0, max_cost, "capacity_costs")
+ model.add(
+ objective
+ == sum(
+ problem.resources[i].unit_cost * capacities[i]
+ for i in range(len(capacities))
+ )
+ )
+ else:
+ objective = makespan
- # Add sentinels.
- task_starts[source] = 0
- task_ends[source] = 0
- task_to_presence_literals[0].append(True)
- task_starts[sink] = makespan
- task_to_presence_literals[sink].append(True)
+ model.minimize(objective)
- # Write model to file.
- if proto_file:
- print(f"Writing proto to{proto_file}")
- model.export_to_file(proto_file)
+ # Add sentinels.
+ task_starts[source] = 0
+ task_ends[source] = 0
+ task_to_presence_literals[0].append(True)
+ task_starts[sink] = makespan
+ task_to_presence_literals[sink].append(True)
- # Solve model.
- solver = cp_model.CpSolver()
+ # Write model to file.
+ if proto_file:
+ print(f"Writing proto to{proto_file}")
+ model.export_to_file(proto_file)
- # Parse user specified parameters.
- if params:
- text_format.Parse(params, solver.parameters)
+ # Solve model.
+ solver = cp_model.CpSolver()
- # Favor objective_shaving over objective_lb_search.
- if solver.parameters.num_workers >= 16 and solver.parameters.num_workers < 24:
- solver.parameters.ignore_subsolvers.append("objective_lb_search")
- solver.parameters.extra_subsolvers.append("objective_shaving")
+ # Parse user specified parameters.
+ if params:
+ text_format.Parse(params, solver.parameters)
- # Experimental: Specify the fact that the objective is a makespan
- solver.parameters.push_all_tasks_toward_start = True
+ # Favor objective_shaving over objective_lb_search.
+ if solver.parameters.num_workers >= 16 and solver.parameters.num_workers < 24:
+ solver.parameters.ignore_subsolvers.append("objective_lb_search")
+ solver.parameters.extra_subsolvers.append("objective_shaving")
- # Enable logging in the main solve.
- solver.parameters.log_search_progress = True
+ # Experimental: Specify the fact that the objective is a makespan
+ solver.parameters.push_all_tasks_toward_start = True
- # Solve the model.
- solver.solve(model)
+ # Enable logging in the main solve.
+ solver.parameters.log_search_progress = True
+
+ # Solve the model.
+ solver.solve(model)
def main(_):
- rcpsp_parser = rcpsp.RcpspParser()
- rcpsp_parser.parse_file(_INPUT.value)
+ rcpsp_parser = rcpsp.RcpspParser()
+ rcpsp_parser.parse_file(_INPUT.value)
- problem = rcpsp_parser.problem()
- print_problem_statistics(problem)
+ problem = rcpsp_parser.problem()
+ print_problem_statistics(problem)
- last_task = len(problem.tasks) - 1
+ last_task = len(problem.tasks) - 1
- solve_rcpsp(
- problem=problem,
- proto_file=_OUTPUT_PROTO.value,
- params=_PARAMS.value,
- active_tasks=set(range(1, last_task)),
- source=0,
- sink=last_task,
- )
+ solve_rcpsp(
+ problem=problem,
+ proto_file=_OUTPUT_PROTO.value,
+ params=_PARAMS.value,
+ active_tasks=set(range(1, last_task)),
+ source=0,
+ sink=last_task,
+ )
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/reallocate_sat.py b/examples/python/reallocate_sat.py
index d159a5a96c9..3146f8c0660 100644
--- a/examples/python/reallocate_sat.py
+++ b/examples/python/reallocate_sat.py
@@ -19,115 +19,115 @@
def main():
- # Data
- data_0 = [
- [107, 107, 107, 0, 0], # pr1
- [0, 47, 47, 47, 0], # pr2
- [10, 10, 10, 0, 0], # pr3
- [0, 55, 55, 55, 55], # pr4
- ]
-
- data_1 = [
- [119444030, 0, 0, 0],
- [34585586, 38358559, 31860661, 0],
- [19654655, 21798799, 18106106, 0],
- [298836792, 0, 0, 0],
- [3713428, 4118530, 4107277, 3072018],
- [6477273, 7183884, 5358471, 0],
- [1485371, 1647412, 1642911, 1228807],
- ]
-
- data_2 = [
- [1194440, 0, 0, 0],
- [345855, 383585, 318606, 0],
- [196546, 217987, 181061, 0],
- [2988367, 0, 0, 0],
- [37134, 41185, 41072, 30720],
- [64772, 71838, 53584, 0],
- [14853, 16474, 16429, 12288],
- ]
-
- pr = data_0
-
- num_pr = len(pr)
- num_years = len(pr[1])
- total = sum(pr[p][y] for p in range(num_pr) for y in range(num_years))
- avg = total // num_years
-
- # Model
- model = cp_model.CpModel()
-
- # Variables
- delta = model.NewIntVar(0, total, "delta")
-
- contributions_per_years = collections.defaultdict(list)
- contributions_per_prs = collections.defaultdict(list)
- all_contribs = {}
-
- for p, inner_l in enumerate(pr):
- for y, item in enumerate(inner_l):
- if item != 0:
- contrib = model.NewIntVar(0, total, "r%d c%d" % (p, y))
- contributions_per_years[y].append(contrib)
- contributions_per_prs[p].append(contrib)
- all_contribs[p, y] = contrib
-
- year_var = [model.NewIntVar(0, total, "y[%i]" % i) for i in range(num_years)]
-
- # Constraints
-
- # Maintain year_var.
- for y in range(num_years):
- model.Add(year_var[y] == sum(contributions_per_years[y]))
-
- # Fixed contributions per pr.
+ # Data
+ data_0 = [
+ [107, 107, 107, 0, 0], # pr1
+ [0, 47, 47, 47, 0], # pr2
+ [10, 10, 10, 0, 0], # pr3
+ [0, 55, 55, 55, 55], # pr4
+ ]
+
+ data_1 = [
+ [119444030, 0, 0, 0],
+ [34585586, 38358559, 31860661, 0],
+ [19654655, 21798799, 18106106, 0],
+ [298836792, 0, 0, 0],
+ [3713428, 4118530, 4107277, 3072018],
+ [6477273, 7183884, 5358471, 0],
+ [1485371, 1647412, 1642911, 1228807],
+ ]
+
+ data_2 = [
+ [1194440, 0, 0, 0],
+ [345855, 383585, 318606, 0],
+ [196546, 217987, 181061, 0],
+ [2988367, 0, 0, 0],
+ [37134, 41185, 41072, 30720],
+ [64772, 71838, 53584, 0],
+ [14853, 16474, 16429, 12288],
+ ]
+
+ pr = data_0
+
+ num_pr = len(pr)
+ num_years = len(pr[1])
+ total = sum(pr[p][y] for p in range(num_pr) for y in range(num_years))
+ avg = total // num_years
+
+ # Model
+ model = cp_model.CpModel()
+
+ # Variables
+ delta = model.NewIntVar(0, total, "delta")
+
+ contributions_per_years = collections.defaultdict(list)
+ contributions_per_prs = collections.defaultdict(list)
+ all_contribs = {}
+
+ for p, inner_l in enumerate(pr):
+ for y, item in enumerate(inner_l):
+ if item != 0:
+ contrib = model.NewIntVar(0, total, "r%d c%d" % (p, y))
+ contributions_per_years[y].append(contrib)
+ contributions_per_prs[p].append(contrib)
+ all_contribs[p, y] = contrib
+
+ year_var = [model.NewIntVar(0, total, "y[%i]" % i) for i in range(num_years)]
+
+ # Constraints
+
+ # Maintain year_var.
+ for y in range(num_years):
+ model.Add(year_var[y] == sum(contributions_per_years[y]))
+
+ # Fixed contributions per pr.
+ for p in range(num_pr):
+ model.Add(sum(pr[p]) == sum(contributions_per_prs[p]))
+
+ # Link delta with variables.
+ for y in range(num_years):
+ model.Add(year_var[y] >= avg - delta)
+
+ for y in range(num_years):
+ model.Add(year_var[y] <= avg + delta)
+
+ # Solve and output
+ model.Minimize(delta)
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ status = solver.Solve(model)
+
+ # Output solution.
+ if status == cp_model.OPTIMAL:
+ print("Data")
+ print(" - total = ", total)
+ print(" - year_average = ", avg)
+ print(" - number of projects = ", num_pr)
+ print(" - number of years = ", num_years)
+
+ print(" - input production")
for p in range(num_pr):
- model.Add(sum(pr[p]) == sum(contributions_per_prs[p]))
-
- # Link delta with variables.
- for y in range(num_years):
- model.Add(year_var[y] >= avg - delta)
+ for y in range(num_years):
+ if pr[p][y] == 0:
+ print(" ", end="")
+ else:
+ print("%10i" % pr[p][y], end="")
+ print()
+
+ print("Solution")
+ for p in range(num_pr):
+ for y in range(num_years):
+ if pr[p][y] == 0:
+ print(" ", end="")
+ else:
+ print("%10i" % solver.Value(all_contribs[p, y]), end="")
+ print()
for y in range(num_years):
- model.Add(year_var[y] <= avg + delta)
-
- # Solve and output
- model.Minimize(delta)
-
- # Solve model.
- solver = cp_model.CpSolver()
- status = solver.Solve(model)
-
- # Output solution.
- if status == cp_model.OPTIMAL:
- print("Data")
- print(" - total = ", total)
- print(" - year_average = ", avg)
- print(" - number of projects = ", num_pr)
- print(" - number of years = ", num_years)
-
- print(" - input production")
- for p in range(num_pr):
- for y in range(num_years):
- if pr[p][y] == 0:
- print(" ", end="")
- else:
- print("%10i" % pr[p][y], end="")
- print()
-
- print("Solution")
- for p in range(num_pr):
- for y in range(num_years):
- if pr[p][y] == 0:
- print(" ", end="")
- else:
- print("%10i" % solver.Value(all_contribs[p, y]), end="")
- print()
-
- for y in range(num_years):
- print("%10i" % solver.Value(year_var[y]), end="")
- print()
+ print("%10i" % solver.Value(year_var[y]), end="")
+ print()
if __name__ == "__main__":
- main()
+ main()
diff --git a/examples/python/shift_scheduling_sat.py b/examples/python/shift_scheduling_sat.py
index a81c0839910..eb51466ed04 100644
--- a/examples/python/shift_scheduling_sat.py
+++ b/examples/python/shift_scheduling_sat.py
@@ -31,32 +31,32 @@
def negated_bounded_span(
works: list[cp_model.BoolVarT], start: int, length: int
) -> list[cp_model.BoolVarT]:
- """Filters an isolated sub-sequence of variables assigned to True.
-
- Extract the span of Boolean variables [start, start + length), negate them,
- and if there is variables to the left/right of this span, surround the span by
- them in non negated form.
-
- Args:
- works: a list of variables to extract the span from.
- start: the start to the span.
- length: the length of the span.
-
- Returns:
- a list of variables which conjunction will be false if the sub-list is
- assigned to True, and correctly bounded by variables assigned to False,
- or by the start or end of works.
- """
- sequence = []
- # left border (start of works, or works[start - 1])
- if start > 0:
- sequence.append(works[start - 1])
- for i in range(length):
- sequence.append(~works[start + i])
- # right border (end of works or works[start + length])
- if start + length < len(works):
- sequence.append(works[start + length])
- return sequence
+ """Filters an isolated sub-sequence of variables assigned to True.
+
+ Extract the span of Boolean variables [start, start + length), negate them,
+ and if there is variables to the left/right of this span, surround the span by
+ them in non negated form.
+
+ Args:
+ works: a list of variables to extract the span from.
+ start: the start to the span.
+ length: the length of the span.
+
+ Returns:
+ a list of variables which conjunction will be false if the sub-list is
+ assigned to True, and correctly bounded by variables assigned to False,
+ or by the start or end of works.
+ """
+ sequence = []
+ # left border (start of works, or works[start - 1])
+ if start > 0:
+ sequence.append(works[start - 1])
+ for i in range(length):
+ sequence.append(~works[start + i])
+ # right border (end of works or works[start + length])
+ if start + length < len(works):
+ sequence.append(works[start + length])
+ return sequence
def add_soft_sequence_constraint(
@@ -70,72 +70,72 @@ def add_soft_sequence_constraint(
max_cost: int,
prefix: str,
) -> tuple[list[cp_model.BoolVarT], list[int]]:
- """Sequence constraint on true variables with soft and hard bounds.
-
- This constraint look at every maximal contiguous sequence of variables
- assigned to true. If forbids sequence of length < hard_min or > hard_max.
- Then it creates penalty terms if the length is < soft_min or > soft_max.
-
- Args:
- model: the sequence constraint is built on this model.
- works: a list of Boolean variables.
- hard_min: any sequence of true variables must have a length of at least
- hard_min.
- soft_min: any sequence should have a length of at least soft_min, or a
- linear penalty on the delta will be added to the objective.
- min_cost: the coefficient of the linear penalty if the length is less than
- soft_min.
- soft_max: any sequence should have a length of at most soft_max, or a linear
- penalty on the delta will be added to the objective.
- hard_max: any sequence of true variables must have a length of at most
- hard_max.
- max_cost: the coefficient of the linear penalty if the length is more than
- soft_max.
- prefix: a base name for penalty literals.
-
- Returns:
- a tuple (variables_list, coefficient_list) containing the different
- penalties created by the sequence constraint.
- """
- cost_literals = []
- cost_coefficients = []
-
- # Forbid sequences that are too short.
- for length in range(1, hard_min):
- for start in range(len(works) - length + 1):
- model.add_bool_or(negated_bounded_span(works, start, length))
-
- # Penalize sequences that are below the soft limit.
- if min_cost > 0:
- for length in range(hard_min, soft_min):
- for start in range(len(works) - length + 1):
- span = negated_bounded_span(works, start, length)
- name = f": under_span(start={start}, length={length})"
- lit = model.new_bool_var(prefix + name)
- span.append(lit)
- model.add_bool_or(span)
- cost_literals.append(lit)
- # We filter exactly the sequence with a short length.
- # The penalty is proportional to the delta with soft_min.
- cost_coefficients.append(min_cost * (soft_min - length))
-
- # Penalize sequences that are above the soft limit.
- if max_cost > 0:
- for length in range(soft_max + 1, hard_max + 1):
- for start in range(len(works) - length + 1):
- span = negated_bounded_span(works, start, length)
- name = f": over_span(start={start}, length={length})"
- lit = model.new_bool_var(prefix + name)
- span.append(lit)
- model.add_bool_or(span)
- cost_literals.append(lit)
- # Cost paid is max_cost * excess length.
- cost_coefficients.append(max_cost * (length - soft_max))
-
- # Just forbid any sequence of true variables with length hard_max + 1
- for start in range(len(works) - hard_max):
- model.add_bool_or([~works[i] for i in range(start, start + hard_max + 1)])
- return cost_literals, cost_coefficients
+ """Sequence constraint on true variables with soft and hard bounds.
+
+ This constraint look at every maximal contiguous sequence of variables
+ assigned to true. If forbids sequence of length < hard_min or > hard_max.
+ Then it creates penalty terms if the length is < soft_min or > soft_max.
+
+ Args:
+ model: the sequence constraint is built on this model.
+ works: a list of Boolean variables.
+ hard_min: any sequence of true variables must have a length of at least
+ hard_min.
+ soft_min: any sequence should have a length of at least soft_min, or a
+ linear penalty on the delta will be added to the objective.
+ min_cost: the coefficient of the linear penalty if the length is less than
+ soft_min.
+ soft_max: any sequence should have a length of at most soft_max, or a linear
+ penalty on the delta will be added to the objective.
+ hard_max: any sequence of true variables must have a length of at most
+ hard_max.
+ max_cost: the coefficient of the linear penalty if the length is more than
+ soft_max.
+ prefix: a base name for penalty literals.
+
+ Returns:
+ a tuple (variables_list, coefficient_list) containing the different
+ penalties created by the sequence constraint.
+ """
+ cost_literals = []
+ cost_coefficients = []
+
+ # Forbid sequences that are too short.
+ for length in range(1, hard_min):
+ for start in range(len(works) - length + 1):
+ model.add_bool_or(negated_bounded_span(works, start, length))
+
+ # Penalize sequences that are below the soft limit.
+ if min_cost > 0:
+ for length in range(hard_min, soft_min):
+ for start in range(len(works) - length + 1):
+ span = negated_bounded_span(works, start, length)
+ name = f": under_span(start={start}, length={length})"
+ lit = model.new_bool_var(prefix + name)
+ span.append(lit)
+ model.add_bool_or(span)
+ cost_literals.append(lit)
+ # We filter exactly the sequence with a short length.
+ # The penalty is proportional to the delta with soft_min.
+ cost_coefficients.append(min_cost * (soft_min - length))
+
+ # Penalize sequences that are above the soft limit.
+ if max_cost > 0:
+ for length in range(soft_max + 1, hard_max + 1):
+ for start in range(len(works) - length + 1):
+ span = negated_bounded_span(works, start, length)
+ name = f": over_span(start={start}, length={length})"
+ lit = model.new_bool_var(prefix + name)
+ span.append(lit)
+ model.add_bool_or(span)
+ cost_literals.append(lit)
+ # Cost paid is max_cost * excess length.
+ cost_coefficients.append(max_cost * (length - soft_max))
+
+ # Just forbid any sequence of true variables with length hard_max + 1
+ for start in range(len(works) - hard_max):
+ model.add_bool_or([~works[i] for i in range(start, start + hard_max + 1)])
+ return cost_literals, cost_coefficients
def add_soft_sum_constraint(
@@ -149,309 +149,312 @@ def add_soft_sum_constraint(
max_cost: int,
prefix: str,
) -> tuple[list[cp_model.IntVar], list[int]]:
- """sum constraint with soft and hard bounds.
-
- This constraint counts the variables assigned to true from works.
- If forbids sum < hard_min or > hard_max.
- Then it creates penalty terms if the sum is < soft_min or > soft_max.
-
- Args:
- model: the sequence constraint is built on this model.
- works: a list of Boolean variables.
- hard_min: any sequence of true variables must have a sum of at least
- hard_min.
- soft_min: any sequence should have a sum of at least soft_min, or a linear
- penalty on the delta will be added to the objective.
- min_cost: the coefficient of the linear penalty if the sum is less than
- soft_min.
- soft_max: any sequence should have a sum of at most soft_max, or a linear
- penalty on the delta will be added to the objective.
- hard_max: any sequence of true variables must have a sum of at most
- hard_max.
- max_cost: the coefficient of the linear penalty if the sum is more than
- soft_max.
- prefix: a base name for penalty variables.
-
- Returns:
- a tuple (variables_list, coefficient_list) containing the different
- penalties created by the sequence constraint.
- """
- cost_variables = []
- cost_coefficients = []
- sum_var = model.new_int_var(hard_min, hard_max, "")
- # This adds the hard constraints on the sum.
- model.add(sum_var == sum(works))
-
- # Penalize sums below the soft_min target.
- if soft_min > hard_min and min_cost > 0:
- delta = model.new_int_var(-len(works), len(works), "")
- model.add(delta == soft_min - sum_var)
- # TODO(user): Compare efficiency with only excess >= soft_min - sum_var.
- excess = model.new_int_var(0, 7, prefix + ": under_sum")
- model.add_max_equality(excess, [delta, 0])
- cost_variables.append(excess)
- cost_coefficients.append(min_cost)
-
- # Penalize sums above the soft_max target.
- if soft_max < hard_max and max_cost > 0:
- delta = model.new_int_var(-7, 7, "")
- model.add(delta == sum_var - soft_max)
- excess = model.new_int_var(0, 7, prefix + ": over_sum")
- model.add_max_equality(excess, [delta, 0])
- cost_variables.append(excess)
- cost_coefficients.append(max_cost)
-
- return cost_variables, cost_coefficients
+ """sum constraint with soft and hard bounds.
+
+ This constraint counts the variables assigned to true from works.
+ If forbids sum < hard_min or > hard_max.
+ Then it creates penalty terms if the sum is < soft_min or > soft_max.
+
+ Args:
+ model: the sequence constraint is built on this model.
+ works: a list of Boolean variables.
+ hard_min: any sequence of true variables must have a sum of at least
+ hard_min.
+ soft_min: any sequence should have a sum of at least soft_min, or a linear
+ penalty on the delta will be added to the objective.
+ min_cost: the coefficient of the linear penalty if the sum is less than
+ soft_min.
+ soft_max: any sequence should have a sum of at most soft_max, or a linear
+ penalty on the delta will be added to the objective.
+ hard_max: any sequence of true variables must have a sum of at most
+ hard_max.
+ max_cost: the coefficient of the linear penalty if the sum is more than
+ soft_max.
+ prefix: a base name for penalty variables.
+
+ Returns:
+ a tuple (variables_list, coefficient_list) containing the different
+ penalties created by the sequence constraint.
+ """
+ cost_variables = []
+ cost_coefficients = []
+ sum_var = model.new_int_var(hard_min, hard_max, "")
+ # This adds the hard constraints on the sum.
+ model.add(sum_var == sum(works))
+
+ # Penalize sums below the soft_min target.
+ if soft_min > hard_min and min_cost > 0:
+ delta = model.new_int_var(-len(works), len(works), "")
+ model.add(delta == soft_min - sum_var)
+ # TODO(user): Compare efficiency with only excess >= soft_min - sum_var.
+ excess = model.new_int_var(0, 7, prefix + ": under_sum")
+ model.add_max_equality(excess, [delta, 0])
+ cost_variables.append(excess)
+ cost_coefficients.append(min_cost)
+
+ # Penalize sums above the soft_max target.
+ if soft_max < hard_max and max_cost > 0:
+ delta = model.new_int_var(-7, 7, "")
+ model.add(delta == sum_var - soft_max)
+ excess = model.new_int_var(0, 7, prefix + ": over_sum")
+ model.add_max_equality(excess, [delta, 0])
+ cost_variables.append(excess)
+ cost_coefficients.append(max_cost)
+
+ return cost_variables, cost_coefficients
def solve_shift_scheduling(params: str, output_proto: str):
- """Solves the shift scheduling problem."""
- # Data
- num_employees = 8
- num_weeks = 3
- shifts = ["O", "M", "A", "N"]
-
- # Fixed assignment: (employee, shift, day).
- # This fixes the first 2 days of the schedule.
- fixed_assignments = [
- (0, 0, 0),
- (1, 0, 0),
- (2, 1, 0),
- (3, 1, 0),
- (4, 2, 0),
- (5, 2, 0),
- (6, 2, 3),
- (7, 3, 0),
- (0, 1, 1),
- (1, 1, 1),
- (2, 2, 1),
- (3, 2, 1),
- (4, 2, 1),
- (5, 0, 1),
- (6, 0, 1),
- (7, 3, 1),
- ]
-
- # Request: (employee, shift, day, weight)
- # A negative weight indicates that the employee desire this assignment.
- requests = [
- # Employee 3 does not want to work on the first Saturday (negative weight
- # for the Off shift).
- (3, 0, 5, -2),
- # Employee 5 wants a night shift on the second Thursday (negative weight).
- (5, 3, 10, -2),
- # Employee 2 does not want a night shift on the first Friday (positive
- # weight).
- (2, 3, 4, 4),
- ]
-
- # Shift constraints on continuous sequence :
- # (shift, hard_min, soft_min, min_penalty,
- # soft_max, hard_max, max_penalty)
- shift_constraints = [
- # One or two consecutive days of rest, this is a hard constraint.
- (0, 1, 1, 0, 2, 2, 0),
- # between 2 and 3 consecutive days of night shifts, 1 and 4 are
- # possible but penalized.
- (3, 1, 2, 20, 3, 4, 5),
- ]
-
- # Weekly sum constraints on shifts days:
- # (shift, hard_min, soft_min, min_penalty,
- # soft_max, hard_max, max_penalty)
- weekly_sum_constraints = [
- # Constraints on rests per week.
- (0, 1, 2, 7, 2, 3, 4),
- # At least 1 night shift per week (penalized). At most 4 (hard).
- (3, 0, 1, 3, 4, 4, 0),
- ]
-
- # Penalized transitions:
- # (previous_shift, next_shift, penalty (0 means forbidden))
- penalized_transitions = [
- # Afternoon to night has a penalty of 4.
- (2, 3, 4),
- # Night to morning is forbidden.
- (3, 1, 0),
- ]
-
- # daily demands for work shifts (morning, afternoon, night) for each day
- # of the week starting on Monday.
- weekly_cover_demands = [
- (2, 3, 1), # Monday
- (2, 3, 1), # Tuesday
- (2, 2, 2), # Wednesday
- (2, 3, 1), # Thursday
- (2, 2, 2), # Friday
- (1, 2, 3), # Saturday
- (1, 3, 1), # Sunday
- ]
-
- # Penalty for exceeding the cover constraint per shift type.
- excess_cover_penalties = (2, 2, 5)
-
- num_days = num_weeks * 7
- num_shifts = len(shifts)
-
- model = cp_model.CpModel()
-
- work = {}
+ """Solves the shift scheduling problem."""
+ # Data
+ num_employees = 8
+ num_weeks = 3
+ shifts = ["O", "M", "A", "N"]
+
+ # Fixed assignment: (employee, shift, day).
+ # This fixes the first 2 days of the schedule.
+ fixed_assignments = [
+ (0, 0, 0),
+ (1, 0, 0),
+ (2, 1, 0),
+ (3, 1, 0),
+ (4, 2, 0),
+ (5, 2, 0),
+ (6, 2, 3),
+ (7, 3, 0),
+ (0, 1, 1),
+ (1, 1, 1),
+ (2, 2, 1),
+ (3, 2, 1),
+ (4, 2, 1),
+ (5, 0, 1),
+ (6, 0, 1),
+ (7, 3, 1),
+ ]
+
+ # Request: (employee, shift, day, weight)
+ # A negative weight indicates that the employee desire this assignment.
+ requests = [
+ # Employee 3 does not want to work on the first Saturday (negative weight
+ # for the Off shift).
+ (3, 0, 5, -2),
+ # Employee 5 wants a night shift on the second Thursday (negative weight).
+ (5, 3, 10, -2),
+ # Employee 2 does not want a night shift on the first Friday (positive
+ # weight).
+ (2, 3, 4, 4),
+ ]
+
+ # Shift constraints on continuous sequence :
+ # (shift, hard_min, soft_min, min_penalty,
+ # soft_max, hard_max, max_penalty)
+ shift_constraints = [
+ # One or two consecutive days of rest, this is a hard constraint.
+ (0, 1, 1, 0, 2, 2, 0),
+ # between 2 and 3 consecutive days of night shifts, 1 and 4 are
+ # possible but penalized.
+ (3, 1, 2, 20, 3, 4, 5),
+ ]
+
+ # Weekly sum constraints on shifts days:
+ # (shift, hard_min, soft_min, min_penalty,
+ # soft_max, hard_max, max_penalty)
+ weekly_sum_constraints = [
+ # Constraints on rests per week.
+ (0, 1, 2, 7, 2, 3, 4),
+ # At least 1 night shift per week (penalized). At most 4 (hard).
+ (3, 0, 1, 3, 4, 4, 0),
+ ]
+
+ # Penalized transitions:
+ # (previous_shift, next_shift, penalty (0 means forbidden))
+ penalized_transitions = [
+ # Afternoon to night has a penalty of 4.
+ (2, 3, 4),
+ # Night to morning is forbidden.
+ (3, 1, 0),
+ ]
+
+ # daily demands for work shifts (morning, afternoon, night) for each day
+ # of the week starting on Monday.
+ weekly_cover_demands = [
+ (2, 3, 1), # Monday
+ (2, 3, 1), # Tuesday
+ (2, 2, 2), # Wednesday
+ (2, 3, 1), # Thursday
+ (2, 2, 2), # Friday
+ (1, 2, 3), # Saturday
+ (1, 3, 1), # Sunday
+ ]
+
+ # Penalty for exceeding the cover constraint per shift type.
+ excess_cover_penalties = (2, 2, 5)
+
+ num_days = num_weeks * 7
+ num_shifts = len(shifts)
+
+ model = cp_model.CpModel()
+
+ work = {}
+ for e in range(num_employees):
+ for s in range(num_shifts):
+ for d in range(num_days):
+ work[e, s, d] = model.new_bool_var(f"work{e}_{s}_{d}")
+
+ # Linear terms of the objective in a minimization context.
+ obj_int_vars: list[cp_model.IntVar] = []
+ obj_int_coeffs: list[int] = []
+ obj_bool_vars: list[cp_model.BoolVarT] = []
+ obj_bool_coeffs: list[int] = []
+
+ # Exactly one shift per day.
+ for e in range(num_employees):
+ for d in range(num_days):
+ model.add_exactly_one(work[e, s, d] for s in range(num_shifts))
+
+ # Fixed assignments.
+ for e, s, d in fixed_assignments:
+ model.add(work[e, s, d] == 1)
+
+ # Employee requests
+ for e, s, d, w in requests:
+ obj_bool_vars.append(work[e, s, d])
+ obj_bool_coeffs.append(w)
+
+ # Shift constraints
+ for ct in shift_constraints:
+ shift, hard_min, soft_min, min_cost, soft_max, hard_max, max_cost = ct
for e in range(num_employees):
- for s in range(num_shifts):
- for d in range(num_days):
- work[e, s, d] = model.new_bool_var(f"work{e}_{s}_{d}")
-
- # Linear terms of the objective in a minimization context.
- obj_int_vars: list[cp_model.IntVar] = []
- obj_int_coeffs: list[int] = []
- obj_bool_vars: list[cp_model.BoolVarT] = []
- obj_bool_coeffs: list[int] = []
-
- # Exactly one shift per day.
+ works = [work[e, shift, d] for d in range(num_days)]
+ variables, coeffs = add_soft_sequence_constraint(
+ model,
+ works,
+ hard_min,
+ soft_min,
+ min_cost,
+ soft_max,
+ hard_max,
+ max_cost,
+ f"shift_constraint(employee {e}, shift {shift})",
+ )
+ obj_bool_vars.extend(variables)
+ obj_bool_coeffs.extend(coeffs)
+
+ # Weekly sum constraints
+ for ct in weekly_sum_constraints:
+ shift, hard_min, soft_min, min_cost, soft_max, hard_max, max_cost = ct
for e in range(num_employees):
- for d in range(num_days):
- model.add_exactly_one(work[e, s, d] for s in range(num_shifts))
-
- # Fixed assignments.
- for e, s, d in fixed_assignments:
- model.add(work[e, s, d] == 1)
-
- # Employee requests
- for e, s, d, w in requests:
- obj_bool_vars.append(work[e, s, d])
- obj_bool_coeffs.append(w)
-
- # Shift constraints
- for ct in shift_constraints:
- shift, hard_min, soft_min, min_cost, soft_max, hard_max, max_cost = ct
- for e in range(num_employees):
- works = [work[e, shift, d] for d in range(num_days)]
- variables, coeffs = add_soft_sequence_constraint(
- model,
- works,
- hard_min,
- soft_min,
- min_cost,
- soft_max,
- hard_max,
- max_cost,
- f"shift_constraint(employee {e}, shift {shift})",
- )
- obj_bool_vars.extend(variables)
- obj_bool_coeffs.extend(coeffs)
-
- # Weekly sum constraints
- for ct in weekly_sum_constraints:
- shift, hard_min, soft_min, min_cost, soft_max, hard_max, max_cost = ct
- for e in range(num_employees):
- for w in range(num_weeks):
- works = [work[e, shift, d + w * 7] for d in range(7)]
- variables, coeffs = add_soft_sum_constraint(
- model,
- works,
- hard_min,
- soft_min,
- min_cost,
- soft_max,
- hard_max,
- max_cost,
- f"weekly_sum_constraint(employee {e}, shift {shift}, week {w})",
- )
- obj_int_vars.extend(variables)
- obj_int_coeffs.extend(coeffs)
-
- # Penalized transitions
- for previous_shift, next_shift, cost in penalized_transitions:
- for e in range(num_employees):
- for d in range(num_days - 1):
- transition = [
- ~work[e, previous_shift, d],
- ~work[e, next_shift, d + 1],
- ]
- if cost == 0:
- model.add_bool_or(transition)
- else:
- trans_var = model.new_bool_var(
- f"transition (employee={e}, day={d})"
- )
- transition.append(trans_var)
- model.add_bool_or(transition)
- obj_bool_vars.append(trans_var)
- obj_bool_coeffs.append(cost)
-
- # Cover constraints
- for s in range(1, num_shifts):
- for w in range(num_weeks):
- for d in range(7):
- works = [work[e, s, w * 7 + d] for e in range(num_employees)]
- # Ignore Off shift.
- min_demand = weekly_cover_demands[d][s - 1]
- worked = model.new_int_var(min_demand, num_employees, "")
- model.add(worked == sum(works))
- over_penalty = excess_cover_penalties[s - 1]
- if over_penalty > 0:
- name = f"excess_demand(shift={s}, week={w}, day={d})"
- excess = model.new_int_var(0, num_employees - min_demand, name)
- model.add(excess == worked - min_demand)
- obj_int_vars.append(excess)
- obj_int_coeffs.append(over_penalty)
-
- # Objective
- model.minimize(
- sum(obj_bool_vars[i] * obj_bool_coeffs[i] for i in range(len(obj_bool_vars)))
- + sum(obj_int_vars[i] * obj_int_coeffs[i] for i in range(len(obj_int_vars)))
- )
-
- if output_proto:
- print(f"Writing proto to {output_proto}")
- with open(output_proto, "w") as text_file:
- text_file.write(str(model))
-
- # Solve the model.
- solver = cp_model.CpSolver()
- if params:
- text_format.Parse(params, solver.parameters)
- solution_printer = cp_model.ObjectiveSolutionPrinter()
- status = solver.solve(model, solution_printer)
-
- # Print solution.
- if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
- print()
- header = " "
- for w in range(num_weeks):
- header += "M T W T F S S "
- print(header)
- for e in range(num_employees):
- schedule = ""
- for d in range(num_days):
- for s in range(num_shifts):
- if solver.boolean_value(work[e, s, d]):
- schedule += shifts[s] + " "
- print(f"worker {e}: {schedule}")
- print()
- print("Penalties:")
- for i, var in enumerate(obj_bool_vars):
- if solver.boolean_value(var):
- penalty = obj_bool_coeffs[i]
- if penalty > 0:
- print(f" {var.name} violated, penalty={penalty}")
- else:
- print(f" {var.name} fulfilled, gain={-penalty}")
-
- for i, var in enumerate(obj_int_vars):
- if solver.value(var) > 0:
- print(
- f" {var.name} violated by {solver.value(var)}, linear"
- f" penalty={obj_int_coeffs[i]}"
- )
-
+ for w in range(num_weeks):
+ works = [work[e, shift, d + w * 7] for d in range(7)]
+ variables, coeffs = add_soft_sum_constraint(
+ model,
+ works,
+ hard_min,
+ soft_min,
+ min_cost,
+ soft_max,
+ hard_max,
+ max_cost,
+ f"weekly_sum_constraint(employee {e}, shift {shift}, week {w})",
+ )
+ obj_int_vars.extend(variables)
+ obj_int_coeffs.extend(coeffs)
+
+ # Penalized transitions
+ for previous_shift, next_shift, cost in penalized_transitions:
+ for e in range(num_employees):
+ for d in range(num_days - 1):
+ transition = [
+ ~work[e, previous_shift, d],
+ ~work[e, next_shift, d + 1],
+ ]
+ if cost == 0:
+ model.add_bool_or(transition)
+ else:
+ trans_var = model.new_bool_var(f"transition (employee={e}, day={d})")
+ transition.append(trans_var)
+ model.add_bool_or(transition)
+ obj_bool_vars.append(trans_var)
+ obj_bool_coeffs.append(cost)
+
+ # Cover constraints
+ for s in range(1, num_shifts):
+ for w in range(num_weeks):
+ for d in range(7):
+ works = [work[e, s, w * 7 + d] for e in range(num_employees)]
+ # Ignore Off shift.
+ min_demand = weekly_cover_demands[d][s - 1]
+ worked = model.new_int_var(min_demand, num_employees, "")
+ model.add(worked == sum(works))
+ over_penalty = excess_cover_penalties[s - 1]
+ if over_penalty > 0:
+ name = f"excess_demand(shift={s}, week={w}, day={d})"
+ excess = model.new_int_var(0, num_employees - min_demand, name)
+ model.add(excess == worked - min_demand)
+ obj_int_vars.append(excess)
+ obj_int_coeffs.append(over_penalty)
+
+ # Objective
+ model.minimize(
+ sum(
+ obj_bool_vars[i] * obj_bool_coeffs[i]
+ for i in range(len(obj_bool_vars))
+ )
+ + sum(
+ obj_int_vars[i] * obj_int_coeffs[i] for i in range(len(obj_int_vars))
+ )
+ )
+
+ if output_proto:
+ print(f"Writing proto to {output_proto}")
+ with open(output_proto, "w") as text_file:
+ text_file.write(str(model))
+
+ # Solve the model.
+ solver = cp_model.CpSolver()
+ if params:
+ text_format.Parse(params, solver.parameters)
+ solution_printer = cp_model.ObjectiveSolutionPrinter()
+ status = solver.solve(model, solution_printer)
+
+ # Print solution.
+ if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
+ print()
+ header = " "
+ for w in range(num_weeks):
+ header += "M T W T F S S "
+ print(header)
+ for e in range(num_employees):
+ schedule = ""
+ for d in range(num_days):
+ for s in range(num_shifts):
+ if solver.boolean_value(work[e, s, d]):
+ schedule += shifts[s] + " "
+ print(f"worker {e}: {schedule}")
print()
- print(solver.response_stats())
+ print("Penalties:")
+ for i, var in enumerate(obj_bool_vars):
+ if solver.boolean_value(var):
+ penalty = obj_bool_coeffs[i]
+ if penalty > 0:
+ print(f" {var.name} violated, penalty={penalty}")
+ else:
+ print(f" {var.name} fulfilled, gain={-penalty}")
+
+ for i, var in enumerate(obj_int_vars):
+ if solver.value(var) > 0:
+ print(
+ f" {var.name} violated by {solver.value(var)}, linear"
+ f" penalty={obj_int_coeffs[i]}"
+ )
+
+ print()
+ print(solver.response_stats())
def main(_):
- solve_shift_scheduling(_PARAMS.value, _OUTPUT_PROTO.value)
+ solve_shift_scheduling(_PARAMS.value, _OUTPUT_PROTO.value)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/single_machine_scheduling_with_setup_release_due_dates_sat.py b/examples/python/single_machine_scheduling_with_setup_release_due_dates_sat.py
index c54a67d26aa..511744c323a 100644
--- a/examples/python/single_machine_scheduling_with_setup_release_due_dates_sat.py
+++ b/examples/python/single_machine_scheduling_with_setup_release_due_dates_sat.py
@@ -38,482 +38,485 @@
# ----------------------------------------------------------------------------
# Intermediate solution printer
class SolutionPrinter(cp_model.CpSolverSolutionCallback):
- """Print intermediate solutions."""
+ """Print intermediate solutions."""
- def __init__(self) -> None:
- cp_model.CpSolverSolutionCallback.__init__(self)
- self.__solution_count = 0
+ def __init__(self) -> None:
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self.__solution_count = 0
- def on_solution_callback(self) -> None:
- """Called at each new solution."""
- print(
- f"Solution {self.__solution_count}, time = {self.wall_time} s,"
- f" objective = {self.objective_value}"
- )
-
-
-def single_machine_scheduling():
- """Solves a complex single machine jobshop scheduling problem."""
-
- parameters = _PARAMS.value
- output_proto_file = _OUTPUT_PROTO.value
-
- # ----------------------------------------------------------------------------
- # Data.
-
- job_durations = [
- 2546,
- 8589,
- 5953,
- 3710,
- 3630,
- 3016,
- 4148,
- 8706,
- 1604,
- 5502,
- 9983,
- 6209,
- 9920,
- 7860,
- 2176,
- ]
-
- setup_times = [
- [
- 3559,
- 1638,
- 2000,
- 3676,
- 2741,
- 2439,
- 2406,
- 1526,
- 1600,
- 3356,
- 4324,
- 1923,
- 3663,
- 4103,
- 2215,
- ],
- [
- 1442,
- 3010,
- 1641,
- 4490,
- 2060,
- 2143,
- 3376,
- 3891,
- 3513,
- 2855,
- 2653,
- 1471,
- 2257,
- 1186,
- 2354,
- ],
- [
- 1728,
- 3583,
- 3243,
- 4080,
- 2191,
- 3644,
- 4023,
- 3510,
- 2135,
- 1346,
- 1410,
- 3565,
- 3181,
- 1126,
- 4169,
- ],
- [
- 1291,
- 1703,
- 3103,
- 4001,
- 1712,
- 1137,
- 3341,
- 3485,
- 2557,
- 2435,
- 1972,
- 1986,
- 1522,
- 4734,
- 2520,
- ],
- [
- 4134,
- 2200,
- 1502,
- 3995,
- 1277,
- 1808,
- 1020,
- 2078,
- 2999,
- 1605,
- 1697,
- 2323,
- 2268,
- 2288,
- 4856,
- ],
- [
- 4974,
- 2480,
- 2492,
- 4088,
- 2587,
- 4652,
- 1478,
- 3942,
- 1222,
- 3305,
- 1206,
- 1024,
- 2605,
- 3080,
- 3516,
- ],
- [
- 1903,
- 2584,
- 2104,
- 1609,
- 4745,
- 2691,
- 1539,
- 2544,
- 2499,
- 2074,
- 4793,
- 1756,
- 2190,
- 1298,
- 2605,
- ],
- [
- 1407,
- 2536,
- 2296,
- 1769,
- 1449,
- 3386,
- 3046,
- 1180,
- 4132,
- 4783,
- 3386,
- 3429,
- 2450,
- 3376,
- 3719,
- ],
- [
- 3026,
- 1637,
- 3628,
- 3096,
- 1498,
- 4947,
- 1912,
- 3703,
- 4107,
- 4730,
- 1805,
- 2189,
- 1789,
- 1985,
- 3586,
- ],
- [
- 3940,
- 1342,
- 1601,
- 2737,
- 1748,
- 3771,
- 4052,
- 1619,
- 2558,
- 3782,
- 4383,
- 3451,
- 4904,
- 1108,
- 1750,
- ],
- [
- 1348,
- 3162,
- 1507,
- 3936,
- 1453,
- 2953,
- 4182,
- 2968,
- 3134,
- 1042,
- 3175,
- 2805,
- 4901,
- 1735,
- 1654,
- ],
- [
- 1099,
- 1711,
- 1245,
- 1067,
- 4343,
- 3407,
- 1108,
- 1784,
- 4803,
- 2342,
- 3377,
- 2037,
- 3563,
- 1621,
- 2840,
- ],
- [
- 2573,
- 4222,
- 3164,
- 2563,
- 3231,
- 4731,
- 2395,
- 1033,
- 4795,
- 3288,
- 2335,
- 4935,
- 4066,
- 1440,
- 4979,
- ],
- [
- 3321,
- 1666,
- 3573,
- 2377,
- 4649,
- 4600,
- 1065,
- 2475,
- 3658,
- 3374,
- 1138,
- 4367,
- 4728,
- 3032,
- 2198,
- ],
- [
- 2986,
- 1180,
- 4095,
- 3132,
- 3987,
- 3880,
- 3526,
- 1460,
- 4885,
- 3827,
- 4945,
- 4419,
- 3486,
- 3805,
- 3804,
- ],
- [
- 4163,
- 3441,
- 1217,
- 2941,
- 1210,
- 3794,
- 1779,
- 1904,
- 4255,
- 4967,
- 4003,
- 3873,
- 1002,
- 2055,
- 4295,
- ],
- ]
-
- due_dates = [
- -1,
- -1,
- 28569,
- -1,
- 98104,
- 27644,
- 55274,
- 57364,
- -1,
- -1,
- 60875,
- 96637,
- 77888,
- -1,
- -1,
- ]
- release_dates = [0, 0, 0, 0, 19380, 0, 0, 48657, 0, 27932, 0, 0, 24876, 0, 0]
-
- precedences = [(0, 2), (1, 2)]
-
- # ----------------------------------------------------------------------------
- # Helper data.
- num_jobs = len(job_durations)
- all_jobs = range(num_jobs)
-
- # ----------------------------------------------------------------------------
- # Preprocess.
- if _PREPROCESS.value:
- for job_id in all_jobs:
- min_incoming_setup = min(
- setup_times[j][job_id] for j in range(num_jobs + 1)
- )
- if release_dates[job_id] != 0:
- min_incoming_setup = min(min_incoming_setup, release_dates[job_id])
- if min_incoming_setup == 0:
- continue
-
- print(f"job {job_id} has a min incoming setup of {min_incoming_setup}")
- # We can transfer some setup times to the duration of the job.
- job_durations[job_id] += min_incoming_setup
- # Decrease corresponding incoming setup times.
- for j in range(num_jobs + 1):
- setup_times[j][job_id] -= min_incoming_setup
- # Adjust release dates if needed.
- if release_dates[job_id] != 0:
- release_dates[job_id] -= min_incoming_setup
-
- # ----------------------------------------------------------------------------
- # Model.
- model = cp_model.CpModel()
-
- # ----------------------------------------------------------------------------
- # Compute a maximum makespan greedily.
- horizon = sum(job_durations) + sum(
- max(setup_times[i][j] for i in range(num_jobs + 1)) for j in range(num_jobs)
+ def on_solution_callback(self) -> None:
+ """Called at each new solution."""
+ print(
+ f"Solution {self.__solution_count}, time = {self.wall_time} s,"
+ f" objective = {self.objective_value}"
)
- print(f"Greedy horizon = {horizon}")
- # ----------------------------------------------------------------------------
- # Global storage of variables.
- intervals = []
- starts = []
- ends = []
- # ----------------------------------------------------------------------------
- # Scan the jobs and create the relevant variables and intervals.
+def single_machine_scheduling():
+ """Solves a complex single machine jobshop scheduling problem."""
+
+ parameters = _PARAMS.value
+ output_proto_file = _OUTPUT_PROTO.value
+
+ # ----------------------------------------------------------------------------
+ # Data.
+
+ job_durations = [
+ 2546,
+ 8589,
+ 5953,
+ 3710,
+ 3630,
+ 3016,
+ 4148,
+ 8706,
+ 1604,
+ 5502,
+ 9983,
+ 6209,
+ 9920,
+ 7860,
+ 2176,
+ ]
+
+ setup_times = [
+ [
+ 3559,
+ 1638,
+ 2000,
+ 3676,
+ 2741,
+ 2439,
+ 2406,
+ 1526,
+ 1600,
+ 3356,
+ 4324,
+ 1923,
+ 3663,
+ 4103,
+ 2215,
+ ],
+ [
+ 1442,
+ 3010,
+ 1641,
+ 4490,
+ 2060,
+ 2143,
+ 3376,
+ 3891,
+ 3513,
+ 2855,
+ 2653,
+ 1471,
+ 2257,
+ 1186,
+ 2354,
+ ],
+ [
+ 1728,
+ 3583,
+ 3243,
+ 4080,
+ 2191,
+ 3644,
+ 4023,
+ 3510,
+ 2135,
+ 1346,
+ 1410,
+ 3565,
+ 3181,
+ 1126,
+ 4169,
+ ],
+ [
+ 1291,
+ 1703,
+ 3103,
+ 4001,
+ 1712,
+ 1137,
+ 3341,
+ 3485,
+ 2557,
+ 2435,
+ 1972,
+ 1986,
+ 1522,
+ 4734,
+ 2520,
+ ],
+ [
+ 4134,
+ 2200,
+ 1502,
+ 3995,
+ 1277,
+ 1808,
+ 1020,
+ 2078,
+ 2999,
+ 1605,
+ 1697,
+ 2323,
+ 2268,
+ 2288,
+ 4856,
+ ],
+ [
+ 4974,
+ 2480,
+ 2492,
+ 4088,
+ 2587,
+ 4652,
+ 1478,
+ 3942,
+ 1222,
+ 3305,
+ 1206,
+ 1024,
+ 2605,
+ 3080,
+ 3516,
+ ],
+ [
+ 1903,
+ 2584,
+ 2104,
+ 1609,
+ 4745,
+ 2691,
+ 1539,
+ 2544,
+ 2499,
+ 2074,
+ 4793,
+ 1756,
+ 2190,
+ 1298,
+ 2605,
+ ],
+ [
+ 1407,
+ 2536,
+ 2296,
+ 1769,
+ 1449,
+ 3386,
+ 3046,
+ 1180,
+ 4132,
+ 4783,
+ 3386,
+ 3429,
+ 2450,
+ 3376,
+ 3719,
+ ],
+ [
+ 3026,
+ 1637,
+ 3628,
+ 3096,
+ 1498,
+ 4947,
+ 1912,
+ 3703,
+ 4107,
+ 4730,
+ 1805,
+ 2189,
+ 1789,
+ 1985,
+ 3586,
+ ],
+ [
+ 3940,
+ 1342,
+ 1601,
+ 2737,
+ 1748,
+ 3771,
+ 4052,
+ 1619,
+ 2558,
+ 3782,
+ 4383,
+ 3451,
+ 4904,
+ 1108,
+ 1750,
+ ],
+ [
+ 1348,
+ 3162,
+ 1507,
+ 3936,
+ 1453,
+ 2953,
+ 4182,
+ 2968,
+ 3134,
+ 1042,
+ 3175,
+ 2805,
+ 4901,
+ 1735,
+ 1654,
+ ],
+ [
+ 1099,
+ 1711,
+ 1245,
+ 1067,
+ 4343,
+ 3407,
+ 1108,
+ 1784,
+ 4803,
+ 2342,
+ 3377,
+ 2037,
+ 3563,
+ 1621,
+ 2840,
+ ],
+ [
+ 2573,
+ 4222,
+ 3164,
+ 2563,
+ 3231,
+ 4731,
+ 2395,
+ 1033,
+ 4795,
+ 3288,
+ 2335,
+ 4935,
+ 4066,
+ 1440,
+ 4979,
+ ],
+ [
+ 3321,
+ 1666,
+ 3573,
+ 2377,
+ 4649,
+ 4600,
+ 1065,
+ 2475,
+ 3658,
+ 3374,
+ 1138,
+ 4367,
+ 4728,
+ 3032,
+ 2198,
+ ],
+ [
+ 2986,
+ 1180,
+ 4095,
+ 3132,
+ 3987,
+ 3880,
+ 3526,
+ 1460,
+ 4885,
+ 3827,
+ 4945,
+ 4419,
+ 3486,
+ 3805,
+ 3804,
+ ],
+ [
+ 4163,
+ 3441,
+ 1217,
+ 2941,
+ 1210,
+ 3794,
+ 1779,
+ 1904,
+ 4255,
+ 4967,
+ 4003,
+ 3873,
+ 1002,
+ 2055,
+ 4295,
+ ],
+ ]
+
+ due_dates = [
+ -1,
+ -1,
+ 28569,
+ -1,
+ 98104,
+ 27644,
+ 55274,
+ 57364,
+ -1,
+ -1,
+ 60875,
+ 96637,
+ 77888,
+ -1,
+ -1,
+ ]
+ release_dates = [0, 0, 0, 0, 19380, 0, 0, 48657, 0, 27932, 0, 0, 24876, 0, 0]
+
+ precedences = [(0, 2), (1, 2)]
+
+ # ----------------------------------------------------------------------------
+ # Helper data.
+ num_jobs = len(job_durations)
+ all_jobs = range(num_jobs)
+
+ # ----------------------------------------------------------------------------
+ # Preprocess.
+ if _PREPROCESS.value:
for job_id in all_jobs:
- duration = job_durations[job_id]
- release_date = release_dates[job_id]
- due_date = due_dates[job_id] if due_dates[job_id] != -1 else horizon
- print(
- f"job {job_id:2}: start = {release_date:5}, duration = {duration:4},"
- f" end = {due_date:6}"
+ min_incoming_setup = min(
+ setup_times[j][job_id] for j in range(num_jobs + 1)
+ )
+ if release_dates[job_id] != 0:
+ min_incoming_setup = min(min_incoming_setup, release_dates[job_id])
+ if min_incoming_setup == 0:
+ continue
+
+ print(f"job {job_id} has a min incoming setup of {min_incoming_setup}")
+ # We can transfer some setup times to the duration of the job.
+ job_durations[job_id] += min_incoming_setup
+ # Decrease corresponding incoming setup times.
+ for j in range(num_jobs + 1):
+ setup_times[j][job_id] -= min_incoming_setup
+ # Adjust release dates if needed.
+ if release_dates[job_id] != 0:
+ release_dates[job_id] -= min_incoming_setup
+
+ # ----------------------------------------------------------------------------
+ # Model.
+ model = cp_model.CpModel()
+
+ # ----------------------------------------------------------------------------
+ # Compute a maximum makespan greedily.
+ horizon = sum(job_durations) + sum(
+ max(setup_times[i][j] for i in range(num_jobs + 1))
+ for j in range(num_jobs)
+ )
+ print(f"Greedy horizon = {horizon}")
+
+ # ----------------------------------------------------------------------------
+ # Global storage of variables.
+ intervals = []
+ starts = []
+ ends = []
+
+ # ----------------------------------------------------------------------------
+ # Scan the jobs and create the relevant variables and intervals.
+ for job_id in all_jobs:
+ duration = job_durations[job_id]
+ release_date = release_dates[job_id]
+ due_date = due_dates[job_id] if due_dates[job_id] != -1 else horizon
+ print(
+ f"job {job_id:2}: start = {release_date:5}, duration = {duration:4},"
+ f" end = {due_date:6}"
+ )
+ name_suffix = f"_{job_id}"
+ start = model.new_int_var(release_date, due_date, "s" + name_suffix)
+ end = model.new_int_var(release_date, due_date, "e" + name_suffix)
+ interval = model.new_interval_var(start, duration, end, "i" + name_suffix)
+ starts.append(start)
+ ends.append(end)
+ intervals.append(interval)
+
+ # No overlap constraint.
+ model.add_no_overlap(intervals)
+
+ # ----------------------------------------------------------------------------
+ # Transition times using a circuit constraint.
+ arcs = []
+ for i in all_jobs:
+ # Initial arc from the dummy node (0) to a task.
+ start_lit = model.new_bool_var("")
+ arcs.append((0, i + 1, start_lit))
+ # If this task is the first, set to minimum starting time.
+ min_start_time = max(release_dates[i], setup_times[0][i])
+ model.add(starts[i] == min_start_time).only_enforce_if(start_lit)
+ # Final arc from an arc to the dummy node.
+ arcs.append((i + 1, 0, model.new_bool_var("")))
+
+ for j in all_jobs:
+ if i == j:
+ continue
+
+ lit = model.new_bool_var(f"{j} follows {i}")
+ arcs.append((i + 1, j + 1, lit))
+
+ # We add the reified precedence to link the literal with the times of the
+ # two tasks.
+ # If release_dates[j] == 0, we can strenghten this precedence into an
+ # equality as we are minimizing the makespan.
+ if release_dates[j] == 0:
+ model.add(starts[j] == ends[i] + setup_times[i + 1][j]).only_enforce_if(
+ lit
)
- name_suffix = f"_{job_id}"
- start = model.new_int_var(release_date, due_date, "s" + name_suffix)
- end = model.new_int_var(release_date, due_date, "e" + name_suffix)
- interval = model.new_interval_var(start, duration, end, "i" + name_suffix)
- starts.append(start)
- ends.append(end)
- intervals.append(interval)
-
- # No overlap constraint.
- model.add_no_overlap(intervals)
-
- # ----------------------------------------------------------------------------
- # Transition times using a circuit constraint.
- arcs = []
- for i in all_jobs:
- # Initial arc from the dummy node (0) to a task.
- start_lit = model.new_bool_var("")
- arcs.append((0, i + 1, start_lit))
- # If this task is the first, set to minimum starting time.
- min_start_time = max(release_dates[i], setup_times[0][i])
- model.add(starts[i] == min_start_time).only_enforce_if(start_lit)
- # Final arc from an arc to the dummy node.
- arcs.append((i + 1, 0, model.new_bool_var("")))
-
- for j in all_jobs:
- if i == j:
- continue
-
- lit = model.new_bool_var(f"{j} follows {i}")
- arcs.append((i + 1, j + 1, lit))
-
- # We add the reified precedence to link the literal with the times of the
- # two tasks.
- # If release_dates[j] == 0, we can strenghten this precedence into an
- # equality as we are minimizing the makespan.
- if release_dates[j] == 0:
- model.add(starts[j] == ends[i] + setup_times[i + 1][j]).only_enforce_if(
- lit
- )
- else:
- model.add(starts[j] >= ends[i] + setup_times[i + 1][j]).only_enforce_if(
- lit
- )
-
- model.add_circuit(arcs)
-
- # ----------------------------------------------------------------------------
- # Precedences.
- for before, after in precedences:
- print(f"job {after} is after job {before}")
- model.add(ends[before] <= starts[after])
-
- # ----------------------------------------------------------------------------
- # Objective.
- makespan = model.new_int_var(0, horizon, "makespan")
- model.add_max_equality(makespan, ends)
- model.minimize(makespan)
-
- # ----------------------------------------------------------------------------
- # Write problem to file.
- if output_proto_file:
- print(f"Writing proto to {output_proto_file}")
- with open(output_proto_file, "w") as text_file:
- text_file.write(str(model))
-
- # ----------------------------------------------------------------------------
- # Solve.
- solver = cp_model.CpSolver()
- if parameters:
- text_format.Parse(parameters, solver.parameters)
- solution_printer = SolutionPrinter()
- solver.best_bound_callback = lambda a: print(f"New objective lower bound: {a}")
- solver.solve(model, solution_printer)
- for job_id in all_jobs:
- print(
- f"job {job_id} starts at {solver.value(starts[job_id])} end ends at"
- f" {solver.value(ends[job_id])}"
+ else:
+ model.add(starts[j] >= ends[i] + setup_times[i + 1][j]).only_enforce_if(
+ lit
)
+ model.add_circuit(arcs)
+
+ # ----------------------------------------------------------------------------
+ # Precedences.
+ for before, after in precedences:
+ print(f"job {after} is after job {before}")
+ model.add(ends[before] <= starts[after])
+
+ # ----------------------------------------------------------------------------
+ # Objective.
+ makespan = model.new_int_var(0, horizon, "makespan")
+ model.add_max_equality(makespan, ends)
+ model.minimize(makespan)
+
+ # ----------------------------------------------------------------------------
+ # Write problem to file.
+ if output_proto_file:
+ print(f"Writing proto to {output_proto_file}")
+ with open(output_proto_file, "w") as text_file:
+ text_file.write(str(model))
+
+ # ----------------------------------------------------------------------------
+ # Solve.
+ solver = cp_model.CpSolver()
+ if parameters:
+ text_format.Parse(parameters, solver.parameters)
+ solution_printer = SolutionPrinter()
+ solver.best_bound_callback = lambda a: print(
+ f"New objective lower bound: {a}"
+ )
+ solver.solve(model, solution_printer)
+ for job_id in all_jobs:
+ print(
+ f"job {job_id} starts at {solver.value(starts[job_id])} end ends at"
+ f" {solver.value(ends[job_id])}"
+ )
+
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- single_machine_scheduling()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ single_machine_scheduling()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/spread_robots_sat.py b/examples/python/spread_robots_sat.py
index 27da1d65b9e..3a67396748f 100644
--- a/examples/python/spread_robots_sat.py
+++ b/examples/python/spread_robots_sat.py
@@ -22,7 +22,9 @@
from google.protobuf import text_format
from ortools.sat.python import cp_model
-_NUM_ROBOTS = flags.DEFINE_integer("num_robots", 8, "Number of robots to place.")
+_NUM_ROBOTS = flags.DEFINE_integer(
+ "num_robots", 8, "Number of robots to place."
+)
_ROOM_SIZE = flags.DEFINE_integer(
"room_size", 20, "Size of the square room where robots are."
)
@@ -34,88 +36,88 @@
def spread_robots(num_robots: int, room_size: int, params: str) -> None:
- """Optimize robots placement."""
- model = cp_model.CpModel()
-
- # Create the list of coordinates (x, y) for each robot.
- x = [model.new_int_var(1, room_size, f"x_{i}") for i in range(num_robots)]
- y = [model.new_int_var(1, room_size, f"y_{i}") for i in range(num_robots)]
-
- # The specification of the problem is to maximize the minimum euclidian
- # distance between any two robots. Unfortunately, the euclidian distance
- # uses the square root operation which is not defined on integer variables.
- # To work around, we will create a min_square_distance variable, then we make
- # sure that its value is less than the square of the euclidian distance
- # between any two robots.
- #
- # This encoding has a low precision. To improve the precision, we will scale
- # the domain of the min_square_distance variable by a constant factor, then
- # multiply the square of the euclidian distance between two robots by the same
- # factor.
- #
- # we create a scaled_min_square_distance variable with a domain of
- # [0..scaling * max euclidian distance**2] such that
- # forall i:
- # scaled_min_square_distance <= scaling * (x_diff_sq[i] + y_diff_sq[i])
- scaling = 1000
- scaled_min_square_distance = model.new_int_var(
- 0, 2 * scaling * room_size**2, "scaled_min_square_distance"
+ """Optimize robots placement."""
+ model = cp_model.CpModel()
+
+ # Create the list of coordinates (x, y) for each robot.
+ x = [model.new_int_var(1, room_size, f"x_{i}") for i in range(num_robots)]
+ y = [model.new_int_var(1, room_size, f"y_{i}") for i in range(num_robots)]
+
+ # The specification of the problem is to maximize the minimum euclidian
+ # distance between any two robots. Unfortunately, the euclidian distance
+ # uses the square root operation which is not defined on integer variables.
+ # To work around, we will create a min_square_distance variable, then we make
+ # sure that its value is less than the square of the euclidian distance
+ # between any two robots.
+ #
+ # This encoding has a low precision. To improve the precision, we will scale
+ # the domain of the min_square_distance variable by a constant factor, then
+ # multiply the square of the euclidian distance between two robots by the same
+ # factor.
+ #
+ # we create a scaled_min_square_distance variable with a domain of
+ # [0..scaling * max euclidian distance**2] such that
+ # forall i:
+ # scaled_min_square_distance <= scaling * (x_diff_sq[i] + y_diff_sq[i])
+ scaling = 1000
+ scaled_min_square_distance = model.new_int_var(
+ 0, 2 * scaling * room_size**2, "scaled_min_square_distance"
+ )
+
+ # Build intermediate variables and get the list of squared distances on
+ # each dimension.
+ for i in range(num_robots - 1):
+ for j in range(i + 1, num_robots):
+ # Compute the distance on each dimension between robot i and robot j.
+ x_diff = model.new_int_var(-room_size, room_size, f"x_diff{i}")
+ y_diff = model.new_int_var(-room_size, room_size, f"y_diff{i}")
+ model.add(x_diff == x[i] - x[j])
+ model.add(y_diff == y[i] - y[j])
+
+ # Compute the square of the previous differences.
+ x_diff_sq = model.new_int_var(0, room_size**2, f"x_diff_sq{i}")
+ y_diff_sq = model.new_int_var(0, room_size**2, f"y_diff_sq{i}")
+ model.add_multiplication_equality(x_diff_sq, x_diff, x_diff)
+ model.add_multiplication_equality(y_diff_sq, y_diff, y_diff)
+
+ # We just need to be <= to the scaled square distance as we are
+ # maximizing the min distance, which is equivalent as maximizing the min
+ # square distance.
+ model.add(scaled_min_square_distance <= scaling * (x_diff_sq + y_diff_sq))
+
+ # Naive symmetry breaking.
+ for i in range(1, num_robots):
+ model.add(x[0] <= x[i])
+ model.add(y[0] <= y[i])
+
+ # Objective
+ model.maximize(scaled_min_square_distance)
+
+ # Creates a solver and solves the model.
+ solver = cp_model.CpSolver()
+ if params:
+ text_format.Parse(params, solver.parameters)
+ solver.parameters.log_search_progress = True
+ status = solver.solve(model)
+
+ # Prints the solution.
+ if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
+ print(
+ f"Spread {num_robots} with a min pairwise distance of"
+ f" {math.sqrt(solver.objective_value / scaling)}"
)
-
- # Build intermediate variables and get the list of squared distances on
- # each dimension.
- for i in range(num_robots - 1):
- for j in range(i + 1, num_robots):
- # Compute the distance on each dimension between robot i and robot j.
- x_diff = model.new_int_var(-room_size, room_size, f"x_diff{i}")
- y_diff = model.new_int_var(-room_size, room_size, f"y_diff{i}")
- model.add(x_diff == x[i] - x[j])
- model.add(y_diff == y[i] - y[j])
-
- # Compute the square of the previous differences.
- x_diff_sq = model.new_int_var(0, room_size**2, f"x_diff_sq{i}")
- y_diff_sq = model.new_int_var(0, room_size**2, f"y_diff_sq{i}")
- model.add_multiplication_equality(x_diff_sq, x_diff, x_diff)
- model.add_multiplication_equality(y_diff_sq, y_diff, y_diff)
-
- # We just need to be <= to the scaled square distance as we are
- # maximizing the min distance, which is equivalent as maximizing the min
- # square distance.
- model.add(scaled_min_square_distance <= scaling * (x_diff_sq + y_diff_sq))
-
- # Naive symmetry breaking.
- for i in range(1, num_robots):
- model.add(x[0] <= x[i])
- model.add(y[0] <= y[i])
-
- # Objective
- model.maximize(scaled_min_square_distance)
-
- # Creates a solver and solves the model.
- solver = cp_model.CpSolver()
- if params:
- text_format.Parse(params, solver.parameters)
- solver.parameters.log_search_progress = True
- status = solver.solve(model)
-
- # Prints the solution.
- if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
- print(
- f"Spread {num_robots} with a min pairwise distance of"
- f" {math.sqrt(solver.objective_value / scaling)}"
- )
- for i in range(num_robots):
- print(f"robot {i}: x={solver.value(x[i])} y={solver.value(y[i])}")
- else:
- print("No solution found.")
+ for i in range(num_robots):
+ print(f"robot {i}: x={solver.value(x[i])} y={solver.value(y[i])}")
+ else:
+ print("No solution found.")
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
- spread_robots(_NUM_ROBOTS.value, _ROOM_SIZE.value, _PARAMS.value)
+ spread_robots(_NUM_ROBOTS.value, _ROOM_SIZE.value, _PARAMS.value)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/steel_mill_slab_sat.py b/examples/python/steel_mill_slab_sat.py
index 6f79d85fbe0..b0c0a101e4d 100644
--- a/examples/python/steel_mill_slab_sat.py
+++ b/examples/python/steel_mill_slab_sat.py
@@ -42,17 +42,17 @@
def build_problem(
problem_id: int,
) -> tuple[int, list[int], int, list[tuple[int, int]]]:
- """Build problem data."""
- if problem_id == 0:
- capacities = [
- # fmt:off
+ """Build problem data."""
+ if problem_id == 0:
+ capacities = [
+ # fmt:off
0, 12, 14, 17, 18, 19, 20, 23, 24, 25, 26, 27, 28, 29, 30, 32, 35, 39, 42, 43, 44,
- # fmt:on
- ]
- num_colors = 88
- num_slabs = 111
- orders = [ # (size, color)
- # fmt:off
+ # fmt:on
+ ]
+ num_colors = 88
+ num_slabs = 111
+ orders = [ # (size, color)
+ # fmt:off
(4, 1), (22, 2), (9, 3), (5, 4), (8, 5), (3, 6), (3, 4), (4, 7),
(7, 4), (7, 8), (3, 6), (2, 6), (2, 4), (8, 9), (5, 10), (7, 11),
(4, 7), (7, 11), (5, 10), (7, 11), (8, 9), (3, 1), (25, 12), (14, 13),
@@ -69,242 +69,244 @@ def build_problem(
(30, 73), (30, 74), (30, 75), (23, 76), (15, 77), (15, 78), (27, 79),
(27, 80), (27, 81), (27, 82), (27, 83), (27, 84), (27, 79), (27, 85),
(27, 86), (10, 87), (3, 88),
- # fmt:on
- ]
- elif problem_id == 1:
- capacities = [0, 17, 44]
- num_colors = 23
- num_slabs = 30
- orders = [ # (size, color)
- # fmt:off
+ # fmt:on
+ ]
+ elif problem_id == 1:
+ capacities = [0, 17, 44]
+ num_colors = 23
+ num_slabs = 30
+ orders = [ # (size, color)
+ # fmt:off
(4, 1), (22, 2), (9, 3), (5, 4), (8, 5), (3, 6), (3, 4), (4, 7), (7, 4),
(7, 8), (3, 6), (2, 6), (2, 4), (8, 9), (5, 10), (7, 11), (4, 7), (7, 11),
(5, 10), (7, 11), (8, 9), (3, 1), (25, 12), (14, 13), (3, 6), (22, 14),
(19, 15), (19, 15), (22, 16), (22, 17), (22, 18), (20, 19), (22, 20),
(5, 21), (4, 22), (10, 23),
- # fmt:on
- ]
- elif problem_id == 2:
- capacities = [0, 17, 44]
- num_colors = 15
- num_slabs = 20
- orders = [ # (size, color)
- # fmt:off
+ # fmt:on
+ ]
+ elif problem_id == 2:
+ capacities = [0, 17, 44]
+ num_colors = 15
+ num_slabs = 20
+ orders = [ # (size, color)
+ # fmt:off
(4, 1), (22, 2), (9, 3), (5, 4), (8, 5), (3, 6), (3, 4), (4, 7), (7, 4),
(7, 8), (3, 6), (2, 6), (2, 4), (8, 9), (5, 10), (7, 11), (4, 7), (7, 11),
(5, 10), (7, 11), (8, 9), (3, 1), (25, 12), (14, 13), (3, 6), (22, 14),
(19, 15), (19, 15),
- # fmt:on
- ]
-
- else: # problem_id == 3, default problem.
- capacities = [0, 17, 44]
- num_colors = 8
- num_slabs = 10
- orders = [ # (size, color)
- (4, 1),
- (22, 2),
- (9, 3),
- (5, 4),
- (8, 5),
- (3, 6),
- (3, 4),
- (4, 7),
- (7, 4),
- (7, 8),
- (3, 6),
- ]
-
- return (num_slabs, capacities, num_colors, orders)
+ # fmt:on
+ ]
+ else: # problem_id == 3, default problem.
+ capacities = [0, 17, 44]
+ num_colors = 8
+ num_slabs = 10
+ orders = [ # (size, color)
+ (4, 1),
+ (22, 2),
+ (9, 3),
+ (5, 4),
+ (8, 5),
+ (3, 6),
+ (3, 4),
+ (4, 7),
+ (7, 4),
+ (7, 8),
+ (3, 6),
+ ]
-class SteelMillSlabSolutionPrinter(cp_model.CpSolverSolutionCallback):
- """Print intermediate solutions."""
-
- def __init__(self, orders, assign, load, loss) -> None:
- cp_model.CpSolverSolutionCallback.__init__(self)
- self.__orders = orders
- self.__assign = assign
- self.__load = load
- self.__loss = loss
- self.__solution_count = 0
- self.__all_orders = range(len(orders))
- self.__all_slabs = range(len(assign[0]))
- self.__start_time = time.time()
-
- def on_solution_callback(self) -> None:
- """Called on each new solution."""
- current_time = time.time()
- objective = sum(self.value(l) for l in self.__loss)
- print(
- f"Solution {self.__solution_count}, time ="
- f" {current_time - self.__start_time} s, objective = {objective}"
- )
- self.__solution_count += 1
- orders_in_slab = [
- [o for o in self.__all_orders if self.value(self.__assign[o][s])]
- for s in self.__all_slabs
- ]
- for s in self.__all_slabs:
- if orders_in_slab[s]:
- line = (
- f" - slab {s}, load = {self.value(self.__load[s])}, loss ="
- f" {self.value(self.__loss[s])}, orders = ["
- )
- for o in orders_in_slab[s]:
- line += f"#{o}(w{self.__orders[o][0]}, c{self.__orders[o][1]})"
- line += "]"
- print(line)
+ return (num_slabs, capacities, num_colors, orders)
-def steel_mill_slab(problem_id: int, break_symmetries: bool) -> None:
- """Solves the Steel Mill Slab Problem."""
- ### Load problem.
- num_slabs, capacities, num_colors, orders = build_problem(problem_id)
-
- num_orders = len(orders)
- num_capacities = len(capacities)
- all_slabs = range(num_slabs)
- all_colors = range(num_colors)
- all_orders = range(len(orders))
+class SteelMillSlabSolutionPrinter(cp_model.CpSolverSolutionCallback):
+ """Print intermediate solutions."""
+
+ def __init__(self, orders, assign, load, loss) -> None:
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self.__orders = orders
+ self.__assign = assign
+ self.__load = load
+ self.__loss = loss
+ self.__solution_count = 0
+ self.__all_orders = range(len(orders))
+ self.__all_slabs = range(len(assign[0]))
+ self.__start_time = time.time()
+
+ def on_solution_callback(self) -> None:
+ """Called on each new solution."""
+ current_time = time.time()
+ objective = sum(self.value(l) for l in self.__loss)
print(
- f"Solving steel mill with {num_orders} orders, {num_slabs} slabs, and"
- f" {num_capacities - 1} capacities"
+ f"Solution {self.__solution_count}, time ="
+ f" {current_time - self.__start_time} s, objective = {objective}"
)
-
- # Compute auxiliary data.
- widths = [x[0] for x in orders]
- colors = [x[1] for x in orders]
- max_capacity = max(capacities)
- loss_array = [
- min(x for x in capacities if x >= c) - c for c in range(max_capacity + 1)
+ self.__solution_count += 1
+ orders_in_slab = [
+ [o for o in self.__all_orders if self.value(self.__assign[o][s])]
+ for s in self.__all_slabs
]
- max_loss = max(loss_array)
- orders_per_color = [
- [o for o in all_orders if colors[o] == c + 1] for c in all_colors
- ]
- unique_color_orders = [
- o for o in all_orders if len(orders_per_color[colors[o] - 1]) == 1
- ]
-
- ### Model problem.
-
- # Create the model and the decision variables.
- model = cp_model.CpModel()
- assign = [
- [model.new_bool_var(f"assign_{o}_to_slab_{s}") for s in all_slabs]
- for o in all_orders
- ]
- loads = [model.new_int_var(0, max_capacity, f"load_of_slab_{s}") for s in all_slabs]
- color_is_in_slab = [
- [model.new_bool_var(f"color_{c + 1}_in_slab_{s}") for c in all_colors]
- for s in all_slabs
- ]
-
- # Compute load of all slabs.
- for s in all_slabs:
- model.add(sum(assign[o][s] * widths[o] for o in all_orders) == loads[s])
-
- # Orders are assigned to one slab.
- for o in all_orders:
- model.add_exactly_one(assign[o])
-
- # Redundant constraint (sum of loads == sum of widths).
- model.add(sum(loads) == sum(widths))
-
- # Link present_colors and assign.
- for c in all_colors:
- for s in all_slabs:
- for o in orders_per_color[c]:
- model.add_implication(assign[o][s], color_is_in_slab[s][c])
- model.add_implication(~color_is_in_slab[s][c], ~assign[o][s])
+ for s in self.__all_slabs:
+ if orders_in_slab[s]:
+ line = (
+ f" - slab {s}, load = {self.value(self.__load[s])}, loss ="
+ f" {self.value(self.__loss[s])}, orders = ["
+ )
+ for o in orders_in_slab[s]:
+ line += f"#{o}(w{self.__orders[o][0]}, c{self.__orders[o][1]})"
+ line += "]"
+ print(line)
- # At most two colors per slab.
- for s in all_slabs:
- model.add(sum(color_is_in_slab[s]) <= 2)
- # Project previous constraint on unique_color_orders
+def steel_mill_slab(problem_id: int, break_symmetries: bool) -> None:
+ """Solves the Steel Mill Slab Problem."""
+ ### Load problem.
+ num_slabs, capacities, num_colors, orders = build_problem(problem_id)
+
+ num_orders = len(orders)
+ num_capacities = len(capacities)
+ all_slabs = range(num_slabs)
+ all_colors = range(num_colors)
+ all_orders = range(len(orders))
+ print(
+ f"Solving steel mill with {num_orders} orders, {num_slabs} slabs, and"
+ f" {num_capacities - 1} capacities"
+ )
+
+ # Compute auxiliary data.
+ widths = [x[0] for x in orders]
+ colors = [x[1] for x in orders]
+ max_capacity = max(capacities)
+ loss_array = [
+ min(x for x in capacities if x >= c) - c for c in range(max_capacity + 1)
+ ]
+ max_loss = max(loss_array)
+ orders_per_color = [
+ [o for o in all_orders if colors[o] == c + 1] for c in all_colors
+ ]
+ unique_color_orders = [
+ o for o in all_orders if len(orders_per_color[colors[o] - 1]) == 1
+ ]
+
+ ### Model problem.
+
+ # Create the model and the decision variables.
+ model = cp_model.CpModel()
+ assign = [
+ [model.new_bool_var(f"assign_{o}_to_slab_{s}") for s in all_slabs]
+ for o in all_orders
+ ]
+ loads = [
+ model.new_int_var(0, max_capacity, f"load_of_slab_{s}") for s in all_slabs
+ ]
+ color_is_in_slab = [
+ [model.new_bool_var(f"color_{c + 1}_in_slab_{s}") for c in all_colors]
+ for s in all_slabs
+ ]
+
+ # Compute load of all slabs.
+ for s in all_slabs:
+ model.add(sum(assign[o][s] * widths[o] for o in all_orders) == loads[s])
+
+ # Orders are assigned to one slab.
+ for o in all_orders:
+ model.add_exactly_one(assign[o])
+
+ # Redundant constraint (sum of loads == sum of widths).
+ model.add(sum(loads) == sum(widths))
+
+ # Link present_colors and assign.
+ for c in all_colors:
for s in all_slabs:
- model.add(sum(assign[o][s] for o in unique_color_orders) <= 2)
-
- # Symmetry breaking.
- for s in range(num_slabs - 1):
- model.add(loads[s] >= loads[s + 1])
-
- # Collect equivalent orders.
- width_to_unique_color_order = {}
- ordered_equivalent_orders = []
- for c in all_colors:
- colored_orders = orders_per_color[c]
- if not colored_orders:
- continue
- if len(colored_orders) == 1:
- o = colored_orders[0]
- w = widths[o]
- if w not in width_to_unique_color_order:
- width_to_unique_color_order[w] = [o]
- else:
- width_to_unique_color_order[w].append(o)
- else:
- local_width_to_order = {}
- for o in colored_orders:
- w = widths[o]
- if w not in local_width_to_order:
- local_width_to_order[w] = []
- local_width_to_order[w].append(o)
- for _, os in local_width_to_order.items():
- if len(os) > 1:
- for p in range(len(os) - 1):
- ordered_equivalent_orders.append((os[p], os[p + 1]))
- for _, os in width_to_unique_color_order.items():
+ for o in orders_per_color[c]:
+ model.add_implication(assign[o][s], color_is_in_slab[s][c])
+ model.add_implication(~color_is_in_slab[s][c], ~assign[o][s])
+
+ # At most two colors per slab.
+ for s in all_slabs:
+ model.add(sum(color_is_in_slab[s]) <= 2)
+
+ # Project previous constraint on unique_color_orders
+ for s in all_slabs:
+ model.add(sum(assign[o][s] for o in unique_color_orders) <= 2)
+
+ # Symmetry breaking.
+ for s in range(num_slabs - 1):
+ model.add(loads[s] >= loads[s + 1])
+
+ # Collect equivalent orders.
+ width_to_unique_color_order = {}
+ ordered_equivalent_orders = []
+ for c in all_colors:
+ colored_orders = orders_per_color[c]
+ if not colored_orders:
+ continue
+ if len(colored_orders) == 1:
+ o = colored_orders[0]
+ w = widths[o]
+ if w not in width_to_unique_color_order:
+ width_to_unique_color_order[w] = [o]
+ else:
+ width_to_unique_color_order[w].append(o)
+ else:
+ local_width_to_order = {}
+ for o in colored_orders:
+ w = widths[o]
+ if w not in local_width_to_order:
+ local_width_to_order[w] = []
+ local_width_to_order[w].append(o)
+ for _, os in local_width_to_order.items():
if len(os) > 1:
- for p in range(len(os) - 1):
- ordered_equivalent_orders.append((os[p], os[p + 1]))
-
- # Create position variables if there are symmetries to be broken.
- if break_symmetries and ordered_equivalent_orders:
- print(
- f" - creating {len(ordered_equivalent_orders)} symmetry breaking"
- " constraints"
+ for p in range(len(os) - 1):
+ ordered_equivalent_orders.append((os[p], os[p + 1]))
+ for _, os in width_to_unique_color_order.items():
+ if len(os) > 1:
+ for p in range(len(os) - 1):
+ ordered_equivalent_orders.append((os[p], os[p + 1]))
+
+ # Create position variables if there are symmetries to be broken.
+ if break_symmetries and ordered_equivalent_orders:
+ print(
+ f" - creating {len(ordered_equivalent_orders)} symmetry breaking"
+ " constraints"
+ )
+ positions = {}
+ for p in ordered_equivalent_orders:
+ if p[0] not in positions:
+ positions[p[0]] = model.new_int_var(
+ 0, num_slabs - 1, f"position_of_slab_{p[0]}"
)
- positions = {}
- for p in ordered_equivalent_orders:
- if p[0] not in positions:
- positions[p[0]] = model.new_int_var(
- 0, num_slabs - 1, f"position_of_slab_{p[0]}"
- )
- model.add_map_domain(positions[p[0]], assign[p[0]])
- if p[1] not in positions:
- positions[p[1]] = model.new_int_var(
- 0, num_slabs - 1, f"position_of_slab_{p[1]}"
- )
- model.add_map_domain(positions[p[1]], assign[p[1]])
- # Finally add the symmetry breaking constraint.
- model.add(positions[p[0]] <= positions[p[1]])
-
- # Objective.
- obj = model.new_int_var(0, num_slabs * max_loss, "obj")
- losses = [model.new_int_var(0, max_loss, f"loss_{s}") for s in all_slabs]
- for s in all_slabs:
- model.add_element(loads[s], loss_array, losses[s])
- model.add(obj == sum(losses))
- model.minimize(obj)
-
- ### Solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
- objective_printer = cp_model.ObjectiveSolutionPrinter()
- status = solver.solve(model, objective_printer)
-
- ### Output the solution.
- if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
- print(
- f"Loss = {solver.objective_value}, time = {solver.wall_time} s,"
- f" {solver.num_conflicts} conflicts"
+ model.add_map_domain(positions[p[0]], assign[p[0]])
+ if p[1] not in positions:
+ positions[p[1]] = model.new_int_var(
+ 0, num_slabs - 1, f"position_of_slab_{p[1]}"
)
- else:
- print("No solution")
+ model.add_map_domain(positions[p[1]], assign[p[1]])
+ # Finally add the symmetry breaking constraint.
+ model.add(positions[p[0]] <= positions[p[1]])
+
+ # Objective.
+ obj = model.new_int_var(0, num_slabs * max_loss, "obj")
+ losses = [model.new_int_var(0, max_loss, f"loss_{s}") for s in all_slabs]
+ for s in all_slabs:
+ model.add_element(loads[s], loss_array, losses[s])
+ model.add(obj == sum(losses))
+ model.minimize(obj)
+
+ ### Solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+ objective_printer = cp_model.ObjectiveSolutionPrinter()
+ status = solver.solve(model, objective_printer)
+
+ ### Output the solution.
+ if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
+ print(
+ f"Loss = {solver.objective_value}, time = {solver.wall_time} s,"
+ f" {solver.num_conflicts} conflicts"
+ )
+ else:
+ print("No solution")
def collect_valid_slabs_dp(
@@ -313,264 +315,269 @@ def collect_valid_slabs_dp(
widths: list[int],
loss_array: list[int],
) -> list[list[int]]:
- """Collect valid columns (assign, loss) for one slab."""
- start_time = time.time()
-
- max_capacity = max(capacities)
-
- valid_assignment = collections.namedtuple("valid_assignment", "orders load colors")
- all_valid_assignments = [valid_assignment(orders=[], load=0, colors=[])]
-
- for order_id, new_color in enumerate(colors):
- new_width = widths[order_id]
- new_assignments = []
- for assignment in all_valid_assignments:
- if assignment.load + new_width > max_capacity:
- continue
- new_colors = list(assignment.colors)
- if new_color not in new_colors:
- new_colors.append(new_color)
- if len(new_colors) > 2:
- continue
- new_assignment = valid_assignment(
- orders=assignment.orders + [order_id],
- load=assignment.load + new_width,
- colors=new_colors,
- )
- new_assignments.append(new_assignment)
- all_valid_assignments.extend(new_assignments)
+ """Collect valid columns (assign, loss) for one slab."""
+ start_time = time.time()
- print(
- f"{len(all_valid_assignments)} assignments created in"
- f" {time.time() - start_time:2f} s"
- )
- tuples = []
- for assignment in all_valid_assignments:
- solution = [0] * len(colors)
- for i in assignment.orders:
- solution[i] = 1
- solution.append(loss_array[assignment.load])
- solution.append(assignment.load)
- tuples.append(solution)
-
- return tuples
-
-
-def steel_mill_slab_with_valid_slabs(problem_id: int, break_symmetries: bool) -> None:
- """Solves the Steel Mill Slab Problem."""
- ### Load problem.
- (num_slabs, capacities, num_colors, orders) = build_problem(problem_id)
-
- num_orders = len(orders)
- num_capacities = len(capacities)
- all_slabs = range(num_slabs)
- all_colors = range(num_colors)
- all_orders = range(len(orders))
- print(
- f"Solving steel mill with {num_orders} orders, {num_slabs} slabs, and"
- f" {num_capacities - 1} capacities"
- )
-
- # Compute auxiliary data.
- widths = [x[0] for x in orders]
- colors = [x[1] for x in orders]
- max_capacity = max(capacities)
- loss_array = [
- min(x for x in capacities if x >= c) - c for c in range(max_capacity + 1)
- ]
- max_loss = max(loss_array)
+ max_capacity = max(capacities)
- ### Model problem.
-
- # Create the model and the decision variables.
- model = cp_model.CpModel()
- assign = [
- [model.new_bool_var(r"assign_{o}_to_slab_{s}") for s in all_slabs]
- for o in all_orders
- ]
- loads = [model.new_int_var(0, max_capacity, f"load_{s}") for s in all_slabs]
- losses = [model.new_int_var(0, max_loss, f"loss_{s}") for s in all_slabs]
+ valid_assignment = collections.namedtuple(
+ "valid_assignment", "orders load colors"
+ )
+ all_valid_assignments = [valid_assignment(orders=[], load=0, colors=[])]
- unsorted_valid_slabs = collect_valid_slabs_dp(
- capacities, colors, widths, loss_array
+ for order_id, new_color in enumerate(colors):
+ new_width = widths[order_id]
+ new_assignments = []
+ for assignment in all_valid_assignments:
+ if assignment.load + new_width > max_capacity:
+ continue
+ new_colors = list(assignment.colors)
+ if new_color not in new_colors:
+ new_colors.append(new_color)
+ if len(new_colors) > 2:
+ continue
+ new_assignment = valid_assignment(
+ orders=assignment.orders + [order_id],
+ load=assignment.load + new_width,
+ colors=new_colors,
+ )
+ new_assignments.append(new_assignment)
+ all_valid_assignments.extend(new_assignments)
+
+ print(
+ f"{len(all_valid_assignments)} assignments created in"
+ f" {time.time() - start_time:2f} s"
+ )
+ tuples = []
+ for assignment in all_valid_assignments:
+ solution = [0] * len(colors)
+ for i in assignment.orders:
+ solution[i] = 1
+ solution.append(loss_array[assignment.load])
+ solution.append(assignment.load)
+ tuples.append(solution)
+
+ return tuples
+
+
+def steel_mill_slab_with_valid_slabs(
+ problem_id: int, break_symmetries: bool
+) -> None:
+ """Solves the Steel Mill Slab Problem."""
+ ### Load problem.
+ (num_slabs, capacities, num_colors, orders) = build_problem(problem_id)
+
+ num_orders = len(orders)
+ num_capacities = len(capacities)
+ all_slabs = range(num_slabs)
+ all_colors = range(num_colors)
+ all_orders = range(len(orders))
+ print(
+ f"Solving steel mill with {num_orders} orders, {num_slabs} slabs, and"
+ f" {num_capacities - 1} capacities"
+ )
+
+ # Compute auxiliary data.
+ widths = [x[0] for x in orders]
+ colors = [x[1] for x in orders]
+ max_capacity = max(capacities)
+ loss_array = [
+ min(x for x in capacities if x >= c) - c for c in range(max_capacity + 1)
+ ]
+ max_loss = max(loss_array)
+
+ ### Model problem.
+
+ # Create the model and the decision variables.
+ model = cp_model.CpModel()
+ assign = [
+ [model.new_bool_var(r"assign_{o}_to_slab_{s}") for s in all_slabs]
+ for o in all_orders
+ ]
+ loads = [model.new_int_var(0, max_capacity, f"load_{s}") for s in all_slabs]
+ losses = [model.new_int_var(0, max_loss, f"loss_{s}") for s in all_slabs]
+
+ unsorted_valid_slabs = collect_valid_slabs_dp(
+ capacities, colors, widths, loss_array
+ )
+ # Sort slab by descending load/loss. Remove duplicates.
+ valid_slabs = sorted(unsorted_valid_slabs, key=lambda c: 1000 * c[-1] + c[-2])
+
+ for s in all_slabs:
+ model.add_allowed_assignments(
+ [assign[o][s] for o in all_orders] + [losses[s], loads[s]], valid_slabs
)
- # Sort slab by descending load/loss. Remove duplicates.
- valid_slabs = sorted(unsorted_valid_slabs, key=lambda c: 1000 * c[-1] + c[-2])
-
- for s in all_slabs:
- model.add_allowed_assignments(
- [assign[o][s] for o in all_orders] + [losses[s], loads[s]], valid_slabs
- )
-
- # Orders are assigned to one slab.
- for o in all_orders:
- model.add_exactly_one(assign[o])
-
- # Redundant constraint (sum of loads == sum of widths).
- model.add(sum(loads) == sum(widths))
-
- # Symmetry breaking.
- for s in range(num_slabs - 1):
- model.add(loads[s] >= loads[s + 1])
-
- # Collect equivalent orders.
- if break_symmetries:
- print("Breaking symmetries")
- width_to_unique_color_order = {}
- ordered_equivalent_orders = []
- orders_per_color = [
- [o for o in all_orders if colors[o] == c + 1] for c in all_colors
- ]
- for c in all_colors:
- colored_orders = orders_per_color[c]
- if not colored_orders:
- continue
- if len(colored_orders) == 1:
- o = colored_orders[0]
- w = widths[o]
- if w not in width_to_unique_color_order:
- width_to_unique_color_order[w] = [o]
- else:
- width_to_unique_color_order[w].append(o)
- else:
- local_width_to_order = {}
- for o in colored_orders:
- w = widths[o]
- if w not in local_width_to_order:
- local_width_to_order[w] = []
- local_width_to_order[w].append(o)
- for _, os in local_width_to_order.items():
- if len(os) > 1:
- for p in range(len(os) - 1):
- ordered_equivalent_orders.append((os[p], os[p + 1]))
- for _, os in width_to_unique_color_order.items():
- if len(os) > 1:
- for p in range(len(os) - 1):
- ordered_equivalent_orders.append((os[p], os[p + 1]))
-
- # Create position variables if there are symmetries to be broken.
- if ordered_equivalent_orders:
- print(
- f" - creating {len(ordered_equivalent_orders)} symmetry breaking"
- " constraints"
- )
- positions = {}
- for p in ordered_equivalent_orders:
- if p[0] not in positions:
- positions[p[0]] = model.new_int_var(
- 0, num_slabs - 1, f"position_of_slab_{p[0]}"
- )
- model.add_map_domain(positions[p[0]], assign[p[0]])
- if p[1] not in positions:
- positions[p[1]] = model.new_int_var(
- 0, num_slabs - 1, f"position_of_slab_{p[1]}"
- )
- model.add_map_domain(positions[p[1]], assign[p[1]])
- # Finally add the symmetry breaking constraint.
- model.add(positions[p[0]] <= positions[p[1]])
-
- # Objective.
- model.minimize(sum(losses))
-
- print("Model created")
-
- ### Solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
-
- solution_printer = SteelMillSlabSolutionPrinter(orders, assign, loads, losses)
- status = solver.solve(model, solution_printer)
-
- ### Output the solution.
- if status == cp_model.OPTIMAL:
- print(
- f"Loss = {solver.objective_value}, time = {solver.wall_time:2f} s,"
- f" {solver.num_conflicts} conflicts"
- )
- else:
- print("No solution")
+ # Orders are assigned to one slab.
+ for o in all_orders:
+ model.add_exactly_one(assign[o])
-def steel_mill_slab_with_column_generation(problem_id: int) -> None:
- """Solves the Steel Mill Slab Problem."""
- ### Load problem.
- (num_slabs, capacities, _, orders) = build_problem(problem_id)
+ # Redundant constraint (sum of loads == sum of widths).
+ model.add(sum(loads) == sum(widths))
- num_orders = len(orders)
- num_capacities = len(capacities)
- all_orders = range(len(orders))
- print(
- f"Solving steel mill with {num_orders} orders, {num_slabs} slabs, and"
- f" {num_capacities - 1} capacities"
- )
+ # Symmetry breaking.
+ for s in range(num_slabs - 1):
+ model.add(loads[s] >= loads[s + 1])
- # Compute auxiliary data.
- widths = [x[0] for x in orders]
- colors = [x[1] for x in orders]
- max_capacity = max(capacities)
- loss_array = [
- min(x for x in capacities if x >= c) - c for c in range(max_capacity + 1)
+ # Collect equivalent orders.
+ if break_symmetries:
+ print("Breaking symmetries")
+ width_to_unique_color_order = {}
+ ordered_equivalent_orders = []
+ orders_per_color = [
+ [o for o in all_orders if colors[o] == c + 1] for c in all_colors
]
+ for c in all_colors:
+ colored_orders = orders_per_color[c]
+ if not colored_orders:
+ continue
+ if len(colored_orders) == 1:
+ o = colored_orders[0]
+ w = widths[o]
+ if w not in width_to_unique_color_order:
+ width_to_unique_color_order[w] = [o]
+ else:
+ width_to_unique_color_order[w].append(o)
+ else:
+ local_width_to_order = {}
+ for o in colored_orders:
+ w = widths[o]
+ if w not in local_width_to_order:
+ local_width_to_order[w] = []
+ local_width_to_order[w].append(o)
+ for _, os in local_width_to_order.items():
+ if len(os) > 1:
+ for p in range(len(os) - 1):
+ ordered_equivalent_orders.append((os[p], os[p + 1]))
+ for _, os in width_to_unique_color_order.items():
+ if len(os) > 1:
+ for p in range(len(os) - 1):
+ ordered_equivalent_orders.append((os[p], os[p + 1]))
- ### Model problem.
-
- # Generate all valid slabs (columns)
- unsorted_valid_slabs = collect_valid_slabs_dp(
- capacities, colors, widths, loss_array
+ # Create position variables if there are symmetries to be broken.
+ if ordered_equivalent_orders:
+ print(
+ f" - creating {len(ordered_equivalent_orders)} symmetry breaking"
+ " constraints"
+ )
+ positions = {}
+ for p in ordered_equivalent_orders:
+ if p[0] not in positions:
+ positions[p[0]] = model.new_int_var(
+ 0, num_slabs - 1, f"position_of_slab_{p[0]}"
+ )
+ model.add_map_domain(positions[p[0]], assign[p[0]])
+ if p[1] not in positions:
+ positions[p[1]] = model.new_int_var(
+ 0, num_slabs - 1, f"position_of_slab_{p[1]}"
+ )
+ model.add_map_domain(positions[p[1]], assign[p[1]])
+ # Finally add the symmetry breaking constraint.
+ model.add(positions[p[0]] <= positions[p[1]])
+
+ # Objective.
+ model.minimize(sum(losses))
+
+ print("Model created")
+
+ ### Solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+
+ solution_printer = SteelMillSlabSolutionPrinter(orders, assign, loads, losses)
+ status = solver.solve(model, solution_printer)
+
+ ### Output the solution.
+ if status == cp_model.OPTIMAL:
+ print(
+ f"Loss = {solver.objective_value}, time = {solver.wall_time:2f} s,"
+ f" {solver.num_conflicts} conflicts"
)
+ else:
+ print("No solution")
- # Sort slab by descending load/loss. Remove duplicates.
- valid_slabs = sorted(unsorted_valid_slabs, key=lambda c: 1000 * c[-1] + c[-2])
- all_valid_slabs = range(len(valid_slabs))
- # create model and decision variables.
- model = cp_model.CpModel()
- selected = [model.new_bool_var(f"selected_{i}") for i in all_valid_slabs]
-
- for order_id in all_orders:
- model.add(
- sum(selected[i] for i, slab in enumerate(valid_slabs) if slab[order_id])
- == 1
- )
-
- # Redundant constraint (sum of loads == sum of widths).
+def steel_mill_slab_with_column_generation(problem_id: int) -> None:
+ """Solves the Steel Mill Slab Problem."""
+ ### Load problem.
+ (num_slabs, capacities, _, orders) = build_problem(problem_id)
+
+ num_orders = len(orders)
+ num_capacities = len(capacities)
+ all_orders = range(len(orders))
+ print(
+ f"Solving steel mill with {num_orders} orders, {num_slabs} slabs, and"
+ f" {num_capacities - 1} capacities"
+ )
+
+ # Compute auxiliary data.
+ widths = [x[0] for x in orders]
+ colors = [x[1] for x in orders]
+ max_capacity = max(capacities)
+ loss_array = [
+ min(x for x in capacities if x >= c) - c for c in range(max_capacity + 1)
+ ]
+
+ ### Model problem.
+
+ # Generate all valid slabs (columns)
+ unsorted_valid_slabs = collect_valid_slabs_dp(
+ capacities, colors, widths, loss_array
+ )
+
+ # Sort slab by descending load/loss. Remove duplicates.
+ valid_slabs = sorted(unsorted_valid_slabs, key=lambda c: 1000 * c[-1] + c[-2])
+ all_valid_slabs = range(len(valid_slabs))
+
+ # create model and decision variables.
+ model = cp_model.CpModel()
+ selected = [model.new_bool_var(f"selected_{i}") for i in all_valid_slabs]
+
+ for order_id in all_orders:
model.add(
- sum(selected[i] * valid_slabs[i][-1] for i in all_valid_slabs) == sum(widths)
+ sum(selected[i] for i, slab in enumerate(valid_slabs) if slab[order_id])
+ == 1
)
- # Objective.
- model.minimize(sum(selected[i] * valid_slabs[i][-2] for i in all_valid_slabs))
+ # Redundant constraint (sum of loads == sum of widths).
+ model.add(
+ sum(selected[i] * valid_slabs[i][-1] for i in all_valid_slabs)
+ == sum(widths)
+ )
- print("Model created")
+ # Objective.
+ model.minimize(sum(selected[i] * valid_slabs[i][-2] for i in all_valid_slabs))
- ### Solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
- solution_printer = cp_model.ObjectiveSolutionPrinter()
- status = solver.solve(model, solution_printer)
+ print("Model created")
- ### Output the solution.
- if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
- print(
- f"Loss = {solver.objective_value}, time = {solver.wall_time:2f} s,"
- f" {solver.num_conflicts} conflicts"
- )
- else:
- print("No solution")
+ ### Solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+ solution_printer = cp_model.ObjectiveSolutionPrinter()
+ status = solver.solve(model, solution_printer)
+
+ ### Output the solution.
+ if status in (cp_model.OPTIMAL, cp_model.FEASIBLE):
+ print(
+ f"Loss = {solver.objective_value}, time = {solver.wall_time:2f} s,"
+ f" {solver.num_conflicts} conflicts"
+ )
+ else:
+ print("No solution")
def main(_):
- if _SOLVER.value == "sat":
- steel_mill_slab(_PROBLEM.value, _BREAK_SYMMETRIES.value)
- elif _SOLVER.value == "sat_table":
- steel_mill_slab_with_valid_slabs(_PROBLEM.value, _BREAK_SYMMETRIES.value)
- elif _SOLVER.value == "sat_column":
- steel_mill_slab_with_column_generation(_PROBLEM.value)
- else:
- print(f"Unknown model {_SOLVER.value}")
+ if _SOLVER.value == "sat":
+ steel_mill_slab(_PROBLEM.value, _BREAK_SYMMETRIES.value)
+ elif _SOLVER.value == "sat_table":
+ steel_mill_slab_with_valid_slabs(_PROBLEM.value, _BREAK_SYMMETRIES.value)
+ elif _SOLVER.value == "sat_column":
+ steel_mill_slab_with_column_generation(_PROBLEM.value)
+ else:
+ print(f"Unknown model {_SOLVER.value}")
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/sudoku_sat.py b/examples/python/sudoku_sat.py
index 069322242cd..ebda54126be 100755
--- a/examples/python/sudoku_sat.py
+++ b/examples/python/sudoku_sat.py
@@ -18,62 +18,62 @@
def solve_sudoku() -> None:
- """Solves the sudoku problem with the CP-SAT solver."""
- # Create the model.
- model = cp_model.CpModel()
+ """Solves the sudoku problem with the CP-SAT solver."""
+ # Create the model.
+ model = cp_model.CpModel()
- cell_size = 3
- line_size = cell_size**2
- line = list(range(0, line_size))
- cell = list(range(0, cell_size))
+ cell_size = 3
+ line_size = cell_size**2
+ line = list(range(0, line_size))
+ cell = list(range(0, cell_size))
- initial_grid = [
- [0, 6, 0, 0, 5, 0, 0, 2, 0],
- [0, 0, 0, 3, 0, 0, 0, 9, 0],
- [7, 0, 0, 6, 0, 0, 0, 1, 0],
- [0, 0, 6, 0, 3, 0, 4, 0, 0],
- [0, 0, 4, 0, 7, 0, 1, 0, 0],
- [0, 0, 5, 0, 9, 0, 8, 0, 0],
- [0, 4, 0, 0, 0, 1, 0, 0, 6],
- [0, 3, 0, 0, 0, 8, 0, 0, 0],
- [0, 2, 0, 0, 4, 0, 0, 5, 0],
- ]
+ initial_grid = [
+ [0, 6, 0, 0, 5, 0, 0, 2, 0],
+ [0, 0, 0, 3, 0, 0, 0, 9, 0],
+ [7, 0, 0, 6, 0, 0, 0, 1, 0],
+ [0, 0, 6, 0, 3, 0, 4, 0, 0],
+ [0, 0, 4, 0, 7, 0, 1, 0, 0],
+ [0, 0, 5, 0, 9, 0, 8, 0, 0],
+ [0, 4, 0, 0, 0, 1, 0, 0, 6],
+ [0, 3, 0, 0, 0, 8, 0, 0, 0],
+ [0, 2, 0, 0, 4, 0, 0, 5, 0],
+ ]
- grid = {}
- for i in line:
- for j in line:
- grid[(i, j)] = model.new_int_var(1, line_size, "grid %i %i" % (i, j))
+ grid = {}
+ for i in line:
+ for j in line:
+ grid[(i, j)] = model.new_int_var(1, line_size, "grid %i %i" % (i, j))
- # AllDifferent on rows.
- for i in line:
- model.add_all_different(grid[(i, j)] for j in line)
+ # AllDifferent on rows.
+ for i in line:
+ model.add_all_different(grid[(i, j)] for j in line)
- # AllDifferent on columns.
- for j in line:
- model.add_all_different(grid[(i, j)] for i in line)
+ # AllDifferent on columns.
+ for j in line:
+ model.add_all_different(grid[(i, j)] for i in line)
- # AllDifferent on cells.
- for i in cell:
- for j in cell:
- one_cell = []
- for di in cell:
- for dj in cell:
- one_cell.append(grid[(i * cell_size + di, j * cell_size + dj)])
+ # AllDifferent on cells.
+ for i in cell:
+ for j in cell:
+ one_cell = []
+ for di in cell:
+ for dj in cell:
+ one_cell.append(grid[(i * cell_size + di, j * cell_size + dj)])
- model.add_all_different(one_cell)
+ model.add_all_different(one_cell)
- # Initial values.
- for i in line:
- for j in line:
- if initial_grid[i][j]:
- model.add(grid[(i, j)] == initial_grid[i][j])
+ # Initial values.
+ for i in line:
+ for j in line:
+ if initial_grid[i][j]:
+ model.add(grid[(i, j)] == initial_grid[i][j])
- # Solves and prints out the solution.
- solver = cp_model.CpSolver()
- status = solver.solve(model)
- if status == cp_model.OPTIMAL:
- for i in line:
- print([int(solver.value(grid[(i, j)])) for j in line])
+ # Solves and prints out the solution.
+ solver = cp_model.CpSolver()
+ status = solver.solve(model)
+ if status == cp_model.OPTIMAL:
+ for i in line:
+ print([int(solver.value(grid[(i, j)])) for j in line])
solve_sudoku()
diff --git a/examples/python/task_allocation_sat.py b/examples/python/task_allocation_sat.py
index c35b9c6e20e..ab7448ca5cf 100644
--- a/examples/python/task_allocation_sat.py
+++ b/examples/python/task_allocation_sat.py
@@ -24,10 +24,10 @@
def task_allocation_sat() -> None:
- """Solves the task allocation problem."""
- # Availability matrix.
- available = [
- # fmt:off
+ """Solves the task allocation problem."""
+ # Availability matrix.
+ available = [
+ # fmt:off
[
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
@@ -228,71 +228,75 @@ def task_allocation_sat() -> None:
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
],
- # fmt:on
- ]
+ # fmt:on
+ ]
- ntasks = len(available)
- nslots = len(available[0])
+ ntasks = len(available)
+ nslots = len(available[0])
- # sets
- all_tasks = range(ntasks)
- all_slots = range(nslots)
+ # sets
+ all_tasks = range(ntasks)
+ all_slots = range(nslots)
- # max tasks per time slot
- capacity = 3
+ # max tasks per time slot
+ capacity = 3
- # Model
- model = cp_model.CpModel()
- assign = {}
- for task in all_tasks:
- for slot in all_slots:
- assign[(task, slot)] = model.new_bool_var(f"x[{task}][{slot}]")
- count = model.new_int_var(0, nslots, "count")
- slot_used = [model.new_bool_var(f"slot_used[{s}]") for s in all_slots]
+ # Model
+ model = cp_model.CpModel()
+ assign = {}
+ for task in all_tasks:
+ for slot in all_slots:
+ assign[(task, slot)] = model.new_bool_var(f"x[{task}][{slot}]")
+ count = model.new_int_var(0, nslots, "count")
+ slot_used = [model.new_bool_var(f"slot_used[{s}]") for s in all_slots]
- for task in all_tasks:
- model.add(
- sum(
- assign[(task, slot)] for slot in all_slots if available[task][slot] == 1
- )
- == 1
+ for task in all_tasks:
+ model.add(
+ sum(
+ assign[(task, slot)]
+ for slot in all_slots
+ if available[task][slot] == 1
)
+ == 1
+ )
- for slot in all_slots:
- model.add(
- sum(
- assign[(task, slot)] for task in all_tasks if available[task][slot] == 1
- )
- <= capacity
+ for slot in all_slots:
+ model.add(
+ sum(
+ assign[(task, slot)]
+ for task in all_tasks
+ if available[task][slot] == 1
)
- model.add_bool_or(
- [assign[(task, slot)] for task in all_tasks if available[task][slot] == 1]
- ).only_enforce_if(slot_used[slot])
- for task in all_tasks:
- if available[task][slot] == 1:
- model.add_implication(~slot_used[slot], ~assign[(task, slot)])
- else:
- model.add(assign[(task, slot)] == 0)
+ <= capacity
+ )
+ model.add_bool_or([
+ assign[(task, slot)] for task in all_tasks if available[task][slot] == 1
+ ]).only_enforce_if(slot_used[slot])
+ for task in all_tasks:
+ if available[task][slot] == 1:
+ model.add_implication(~slot_used[slot], ~assign[(task, slot)])
+ else:
+ model.add(assign[(task, slot)] == 0)
- model.add(count == sum(slot_used))
- # Redundant constraint. This instance is easier if we add this constraint.
- # model.add(count >= (nslots + capacity - 1) // capacity)
+ model.add(count == sum(slot_used))
+ # Redundant constraint. This instance is easier if we add this constraint.
+ # model.add(count >= (nslots + capacity - 1) // capacity)
- model.minimize(count)
+ model.minimize(count)
- # Create a solver and solve the problem.
- solver = cp_model.CpSolver()
- # Uses the portfolion of heuristics.
- solver.parameters.log_search_progress = True
- solver.parameters.num_search_workers = 16
- solver.solve(model)
+ # Create a solver and solve the problem.
+ solver = cp_model.CpSolver()
+ # Uses the portfolion of heuristics.
+ solver.parameters.log_search_progress = True
+ solver.parameters.num_search_workers = 16
+ solver.solve(model)
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- task_allocation_sat()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ task_allocation_sat()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/tasks_and_workers_assignment_sat.py b/examples/python/tasks_and_workers_assignment_sat.py
index bdb8983087a..1fe7abfa04d 100644
--- a/examples/python/tasks_and_workers_assignment_sat.py
+++ b/examples/python/tasks_and_workers_assignment_sat.py
@@ -20,114 +20,120 @@
class ObjectivePrinter(cp_model.CpSolverSolutionCallback):
- """Print intermediate solutions."""
+ """Print intermediate solutions."""
- def __init__(self):
- cp_model.CpSolverSolutionCallback.__init__(self)
- self.__solution_count = 0
+ def __init__(self):
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self.__solution_count = 0
- def on_solution_callback(self):
- print(
- "Solution %i, time = %f s, objective = %i"
- % (self.__solution_count, self.wall_time, self.objective_value)
- )
- self.__solution_count += 1
+ def on_solution_callback(self):
+ print(
+ "Solution %i, time = %f s, objective = %i"
+ % (self.__solution_count, self.wall_time, self.objective_value)
+ )
+ self.__solution_count += 1
def tasks_and_workers_assignment_sat() -> None:
- """solve the assignment problem."""
- model = cp_model.CpModel()
-
- # CP-SAT solver is integer only.
- task_cost = [24, 10, 7, 2, 11, 16, 1, 13, 9, 27]
- num_tasks = len(task_cost)
- num_workers = 3
- num_groups = 2
- all_workers = range(num_workers)
- all_groups = range(num_groups)
- all_tasks = range(num_tasks)
-
- # Variables
-
- ## x_ij = 1 if worker i is assigned to group j
- x = {}
- for i in all_workers:
- for j in all_groups:
- x[i, j] = model.new_bool_var("x[%i,%i]" % (i, j))
-
- ## y_kj is 1 if task k is assigned to group j
- y = {}
- for k in all_tasks:
- for j in all_groups:
- y[k, j] = model.new_bool_var("x[%i,%i]" % (k, j))
-
- # Constraints
-
- # Each task k is assigned to a group and only one.
- for k in all_tasks:
- model.add(sum(y[k, j] for j in all_groups) == 1)
-
- # Each worker i is assigned to a group and only one.
- for i in all_workers:
- model.add(sum(x[i, j] for j in all_groups) == 1)
-
- # Cost per group
- sum_of_costs = sum(task_cost)
- averages = []
- num_workers_in_group = []
- scaled_sum_of_costs_in_group = []
- scaling = 1000 # We introduce scaling to deal with floating point average.
+ """solve the assignment problem."""
+ model = cp_model.CpModel()
+
+ # CP-SAT solver is integer only.
+ task_cost = [24, 10, 7, 2, 11, 16, 1, 13, 9, 27]
+ num_tasks = len(task_cost)
+ num_workers = 3
+ num_groups = 2
+ all_workers = range(num_workers)
+ all_groups = range(num_groups)
+ all_tasks = range(num_tasks)
+
+ # Variables
+
+ ## x_ij = 1 if worker i is assigned to group j
+ x = {}
+ for i in all_workers:
for j in all_groups:
- n = model.new_int_var(1, num_workers, "num_workers_in_group_%i" % j)
- model.add(n == sum(x[i, j] for i in all_workers))
- c = model.new_int_var(0, sum_of_costs * scaling, "sum_of_costs_of_group_%i" % j)
- model.add(c == sum(y[k, j] * task_cost[k] * scaling for k in all_tasks))
- a = model.new_int_var(0, sum_of_costs * scaling, "average_cost_of_group_%i" % j)
- model.add_division_equality(a, c, n)
-
- averages.append(a)
- num_workers_in_group.append(n)
- scaled_sum_of_costs_in_group.append(c)
-
- # All workers are assigned.
- model.add(sum(num_workers_in_group) == num_workers)
-
- # Objective.
- obj = model.new_int_var(0, sum_of_costs * scaling, "obj")
- model.add_max_equality(obj, averages)
- model.minimize(obj)
-
- # Solve and print out the solution.
- solver = cp_model.CpSolver()
- solver.parameters.max_time_in_seconds = 60 * 60 * 2
- objective_printer = ObjectivePrinter()
- status = solver.solve(model, objective_printer)
- print(solver.response_stats())
-
- if status == cp_model.OPTIMAL:
- for j in all_groups:
- print("Group %i" % j)
- for i in all_workers:
- if solver.boolean_value(x[i, j]):
- print(" - worker %i" % i)
- for k in all_tasks:
- if solver.boolean_value(y[k, j]):
- print(" - task %i with cost %i" % (k, task_cost[k]))
- print(
- " - sum_of_costs = %i"
- % (solver.value(scaled_sum_of_costs_in_group[j]) // scaling)
- )
- print(" - average cost = %f" % (solver.value(averages[j]) * 1.0 / scaling))
+ x[i, j] = model.new_bool_var("x[%i,%i]" % (i, j))
+
+ ## y_kj is 1 if task k is assigned to group j
+ y = {}
+ for k in all_tasks:
+ for j in all_groups:
+ y[k, j] = model.new_bool_var("x[%i,%i]" % (k, j))
+
+ # Constraints
+
+ # Each task k is assigned to a group and only one.
+ for k in all_tasks:
+ model.add(sum(y[k, j] for j in all_groups) == 1)
+
+ # Each worker i is assigned to a group and only one.
+ for i in all_workers:
+ model.add(sum(x[i, j] for j in all_groups) == 1)
+
+ # Cost per group
+ sum_of_costs = sum(task_cost)
+ averages = []
+ num_workers_in_group = []
+ scaled_sum_of_costs_in_group = []
+ scaling = 1000 # We introduce scaling to deal with floating point average.
+ for j in all_groups:
+ n = model.new_int_var(1, num_workers, "num_workers_in_group_%i" % j)
+ model.add(n == sum(x[i, j] for i in all_workers))
+ c = model.new_int_var(
+ 0, sum_of_costs * scaling, "sum_of_costs_of_group_%i" % j
+ )
+ model.add(c == sum(y[k, j] * task_cost[k] * scaling for k in all_tasks))
+ a = model.new_int_var(
+ 0, sum_of_costs * scaling, "average_cost_of_group_%i" % j
+ )
+ model.add_division_equality(a, c, n)
+
+ averages.append(a)
+ num_workers_in_group.append(n)
+ scaled_sum_of_costs_in_group.append(c)
+
+ # All workers are assigned.
+ model.add(sum(num_workers_in_group) == num_workers)
+
+ # Objective.
+ obj = model.new_int_var(0, sum_of_costs * scaling, "obj")
+ model.add_max_equality(obj, averages)
+ model.minimize(obj)
+
+ # Solve and print out the solution.
+ solver = cp_model.CpSolver()
+ solver.parameters.max_time_in_seconds = 60 * 60 * 2
+ objective_printer = ObjectivePrinter()
+ status = solver.solve(model, objective_printer)
+ print(solver.response_stats())
+
+ if status == cp_model.OPTIMAL:
+ for j in all_groups:
+ print("Group %i" % j)
+ for i in all_workers:
+ if solver.boolean_value(x[i, j]):
+ print(" - worker %i" % i)
+ for k in all_tasks:
+ if solver.boolean_value(y[k, j]):
+ print(" - task %i with cost %i" % (k, task_cost[k]))
+ print(
+ " - sum_of_costs = %i"
+ % (solver.value(scaled_sum_of_costs_in_group[j]) // scaling)
+ )
+ print(
+ " - average cost = %f" % (solver.value(averages[j]) * 1.0 / scaling)
+ )
tasks_and_workers_assignment_sat()
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- tasks_and_workers_assignment_sat()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ tasks_and_workers_assignment_sat()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/test_scheduling_sat.py b/examples/python/test_scheduling_sat.py
index fdec2ba13bd..62d037d49bb 100644
--- a/examples/python/test_scheduling_sat.py
+++ b/examples/python/test_scheduling_sat.py
@@ -45,8 +45,8 @@
def build_data() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
- """Build the data frame."""
- tests_str = """
+ """Build the data frame."""
+ tests_str = """
Name Operator TestTime AveragePower
T1 O1 300 200
T2 O1 150 40
@@ -55,24 +55,24 @@ def build_data() -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
T5 O3 210 140
"""
- operators_str = """
+ operators_str = """
Operator Supply
O1 S1
O2 S2
O3 S2
"""
- supplies_str = """
+ supplies_str = """
Supply MaxAllowedPower
S1 230
S2 210
"""
- tests_data = pd.read_table(io.StringIO(tests_str), sep=r"\s+")
- operators_data = pd.read_table(io.StringIO(operators_str), sep=r"\s+")
- supplies_data = pd.read_table(io.StringIO(supplies_str), sep=r"\s+")
+ tests_data = pd.read_table(io.StringIO(tests_str), sep=r"\s+")
+ operators_data = pd.read_table(io.StringIO(operators_str), sep=r"\s+")
+ supplies_data = pd.read_table(io.StringIO(supplies_str), sep=r"\s+")
- return (tests_data, operators_data, supplies_data)
+ return (tests_data, operators_data, supplies_data)
def solve(
@@ -80,97 +80,97 @@ def solve(
operator_data: pd.DataFrame,
supplies_data: pd.DataFrame,
) -> None:
- """Solve the scheduling of tests problem."""
-
- # Parses data.
- operator_to_supply: Dict[str, str] = {}
- for _, row in operator_data.iterrows():
- operator_to_supply[row["Operator"]] = row["Supply"]
-
- supply_to_max_power: Dict[str, int] = {}
- for _, row in supplies_data.iterrows():
- supply_to_max_power[row["Supply"]] = row["MaxAllowedPower"]
-
- horizon = tests_data["TestTime"].sum()
-
- # OR-Tools model.
- model = cp_model.CpModel()
-
- # Create containers.
- tests_per_supply: Dict[str, Tuple[list[cp_model.IntervalVar], list[int]]] = {}
- test_supply: Dict[str, str] = {}
- test_starts: Dict[str, cp_model.IntVar] = {}
- test_durations: Dict[str, int] = {}
- test_powers: Dict[str, int] = {}
- all_ends = []
-
- # Creates intervals.
- for _, row in tests_data.iterrows():
- name: str = row["Name"]
- operator: str = row["Operator"]
- test_time: int = row["TestTime"]
- average_power: int = row["AveragePower"]
- supply: str = operator_to_supply[operator]
-
- start = model.new_int_var(0, horizon - test_time, f"start_{name}")
- interval = model.new_fixed_size_interval_var(
- start, test_time, f"interval_{name}"
- )
-
- # Bookkeeping.
- test_starts[name] = start
- test_durations[name] = test_time
- test_powers[name] = average_power
- test_supply[name] = supply
- if supply not in tests_per_supply.keys():
- tests_per_supply[supply] = ([], [])
- tests_per_supply[supply][0].append(interval)
- tests_per_supply[supply][1].append(average_power)
- all_ends.append(start + test_time)
-
- # Create supply cumulative constraints.
- for supply, (intervals, demands) in tests_per_supply.items():
- model.add_cumulative(intervals, demands, supply_to_max_power[supply])
-
- # Objective.
- makespan = model.new_int_var(0, horizon, "makespan")
- for end in all_ends:
- model.add(makespan >= end)
- model.minimize(makespan)
-
- # Solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
- status = solver.solve(model)
-
- # Report solution.
- if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
- print(f"Makespan = {solver.value(makespan)}")
- for name, start in test_starts.items():
- print(
- f"{name}: start:{solver.value(start)} duration:{test_durations[name]}"
- f" power:{test_powers[name]} on supply {test_supply[name]}"
- )
+ """Solve the scheduling of tests problem."""
+
+ # Parses data.
+ operator_to_supply: Dict[str, str] = {}
+ for _, row in operator_data.iterrows():
+ operator_to_supply[row["Operator"]] = row["Supply"]
+
+ supply_to_max_power: Dict[str, int] = {}
+ for _, row in supplies_data.iterrows():
+ supply_to_max_power[row["Supply"]] = row["MaxAllowedPower"]
+
+ horizon = tests_data["TestTime"].sum()
+
+ # OR-Tools model.
+ model = cp_model.CpModel()
+
+ # Create containers.
+ tests_per_supply: Dict[str, Tuple[list[cp_model.IntervalVar], list[int]]] = {}
+ test_supply: Dict[str, str] = {}
+ test_starts: Dict[str, cp_model.IntVar] = {}
+ test_durations: Dict[str, int] = {}
+ test_powers: Dict[str, int] = {}
+ all_ends = []
+
+ # Creates intervals.
+ for _, row in tests_data.iterrows():
+ name: str = row["Name"]
+ operator: str = row["Operator"]
+ test_time: int = row["TestTime"]
+ average_power: int = row["AveragePower"]
+ supply: str = operator_to_supply[operator]
+
+ start = model.new_int_var(0, horizon - test_time, f"start_{name}")
+ interval = model.new_fixed_size_interval_var(
+ start, test_time, f"interval_{name}"
+ )
+
+ # Bookkeeping.
+ test_starts[name] = start
+ test_durations[name] = test_time
+ test_powers[name] = average_power
+ test_supply[name] = supply
+ if supply not in tests_per_supply.keys():
+ tests_per_supply[supply] = ([], [])
+ tests_per_supply[supply][0].append(interval)
+ tests_per_supply[supply][1].append(average_power)
+ all_ends.append(start + test_time)
+
+ # Create supply cumulative constraints.
+ for supply, (intervals, demands) in tests_per_supply.items():
+ model.add_cumulative(intervals, demands, supply_to_max_power[supply])
+
+ # Objective.
+ makespan = model.new_int_var(0, horizon, "makespan")
+ for end in all_ends:
+ model.add(makespan >= end)
+ model.minimize(makespan)
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+ status = solver.solve(model)
+
+ # Report solution.
+ if status == cp_model.OPTIMAL or status == cp_model.FEASIBLE:
+ print(f"Makespan = {solver.value(makespan)}")
+ for name, start in test_starts.items():
+ print(
+ f"{name}: start:{solver.value(start)} duration:{test_durations[name]}"
+ f" power:{test_powers[name]} on supply {test_supply[name]}"
+ )
def main(argv: Sequence[str]) -> None:
- """Builds the data and solve the scheduling problem."""
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
+ """Builds the data and solve the scheduling problem."""
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
- tests_data, operators_data, supplies_data = build_data()
- print("Tests data")
- print(tests_data)
- print()
- print("Operators data")
- print(operators_data)
- print()
- print("Supplies data")
- print(supplies_data)
+ tests_data, operators_data, supplies_data = build_data()
+ print("Tests data")
+ print(tests_data)
+ print()
+ print("Operators data")
+ print(operators_data)
+ print()
+ print("Supplies data")
+ print(supplies_data)
- solve(tests_data, operators_data, supplies_data)
+ solve(tests_data, operators_data, supplies_data)
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/transit_time.py b/examples/python/transit_time.py
index bfc5551e2cc..28d06594069 100755
--- a/examples/python/transit_time.py
+++ b/examples/python/transit_time.py
@@ -14,211 +14,243 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""Display Transit Time
- Distances are in meters and time in minutes.
+Distances are in meters and time in minutes.
- Manhattan average block: 750ft x 264ft -> 228m x 80m
- src: https://nyti.ms/2GDoRIe "NY Times: Know Your distance"
- here we use: 114m x 80m city block
+Manhattan average block: 750ft x 264ft -> 228m x 80m
+src: https://nyti.ms/2GDoRIe "NY Times: Know Your distance"
+here we use: 114m x 80m city block
"""
from ortools.constraint_solver import pywrapcp
-from ortools.constraint_solver import routing_enums_pb2
###########################
# Problem Data Definition #
###########################
-class Vehicle():
- """Stores the property of a vehicle"""
-
- def __init__(self):
- """Initializes the vehicle properties"""
- self._capacity = 15
- # Travel speed: 5km/h to convert in m/min
- self._speed = 5 * 60 / 3.6
-
- @property
- def speed(self):
- """Gets the average travel speed of a vehicle"""
- return self._speed
-
-
-class CityBlock():
- """City block definition"""
-
- @property
- def width(self):
- """Gets Block size West to East"""
- return 228 / 2
-
- @property
- def height(self):
- """Gets Block size North to South"""
- return 80
-
-
-class DataProblem():
- """Stores the data for the problem"""
-
- def __init__(self):
- """Initializes the data for the problem"""
- self._vehicle = Vehicle()
-
- # Locations in block unit
- locations = \
- [(4, 4), # depot
- (2, 0), (8, 0), # row 0
- (0, 1), (1, 1),
- (5, 2), (7, 2),
- (3, 3), (6, 3),
- (5, 5), (8, 5),
- (1, 6), (2, 6),
- (3, 7), (6, 7),
- (0, 8), (7, 8)]
- # locations in meters using the city block dimension
- city_block = CityBlock()
- self._locations = [(loc[0] * city_block.width,
- loc[1] * city_block.height) for loc in locations]
-
- self._depot = 0
-
- self._demands = \
- [0, # depot
- 1, 1, # 1, 2
- 2, 4, # 3, 4
- 2, 4, # 5, 6
- 8, 8, # 7, 8
- 1, 2, # 9,10
- 1, 2, # 11,12
- 4, 4, # 13, 14
- 8, 8] # 15, 16
-
- self._time_windows = \
- [(0, 0),
- (75, 85), (75, 85), # 1, 2
- (60, 70), (45, 55), # 3, 4
- (0, 8), (50, 60), # 5, 6
- (0, 10), (10, 20), # 7, 8
- (0, 10), (75, 85), # 9, 10
- (85, 95), (5, 15), # 11, 12
- (15, 25), (10, 20), # 13, 14
- (45, 55), (30, 40)] # 15, 16
-
- @property
- def vehicle(self):
- """Gets a vehicle"""
- return self._vehicle
-
- @property
- def locations(self):
- """Gets locations"""
- return self._locations
-
- @property
- def num_locations(self):
- """Gets number of locations"""
- return len(self.locations)
-
- @property
- def depot(self):
- """Gets depot location index"""
- return self._depot
-
- @property
- def demands(self):
- """Gets demands at each location"""
- return self._demands
-
- @property
- def time_per_demand_unit(self):
- """Gets the time (in min) to load a demand"""
- return 5 # 5 minutes/unit
-
- @property
- def time_windows(self):
- """Gets (start time, end time) for each locations"""
- return self._time_windows
+class Vehicle:
+ """Stores the property of a vehicle"""
+
+ def __init__(self):
+ """Initializes the vehicle properties"""
+ self._capacity = 15
+ # Travel speed: 5km/h to convert in m/min
+ self._speed = 5 * 60 / 3.6
+
+ @property
+ def speed(self):
+ """Gets the average travel speed of a vehicle"""
+ return self._speed
+
+
+class CityBlock:
+ """City block definition"""
+
+ @property
+ def width(self):
+ """Gets Block size West to East"""
+ return 228 / 2
+
+ @property
+ def height(self):
+ """Gets Block size North to South"""
+ return 80
+
+
+class DataProblem:
+ """Stores the data for the problem"""
+
+ def __init__(self):
+ """Initializes the data for the problem"""
+ self._vehicle = Vehicle()
+
+ # Locations in block unit
+ locations = [
+ (4, 4), # depot
+ (2, 0),
+ (8, 0), # row 0
+ (0, 1),
+ (1, 1),
+ (5, 2),
+ (7, 2),
+ (3, 3),
+ (6, 3),
+ (5, 5),
+ (8, 5),
+ (1, 6),
+ (2, 6),
+ (3, 7),
+ (6, 7),
+ (0, 8),
+ (7, 8),
+ ]
+ # locations in meters using the city block dimension
+ city_block = CityBlock()
+ self._locations = [
+ (loc[0] * city_block.width, loc[1] * city_block.height)
+ for loc in locations
+ ]
+
+ self._depot = 0
+
+ self._demands = [
+ 0, # depot
+ 1,
+ 1, # 1, 2
+ 2,
+ 4, # 3, 4
+ 2,
+ 4, # 5, 6
+ 8,
+ 8, # 7, 8
+ 1,
+ 2, # 9,10
+ 1,
+ 2, # 11,12
+ 4,
+ 4, # 13, 14
+ 8,
+ 8,
+ ] # 15, 16
+
+ self._time_windows = [
+ (0, 0),
+ (75, 85),
+ (75, 85), # 1, 2
+ (60, 70),
+ (45, 55), # 3, 4
+ (0, 8),
+ (50, 60), # 5, 6
+ (0, 10),
+ (10, 20), # 7, 8
+ (0, 10),
+ (75, 85), # 9, 10
+ (85, 95),
+ (5, 15), # 11, 12
+ (15, 25),
+ (10, 20), # 13, 14
+ (45, 55),
+ (30, 40),
+ ] # 15, 16
+
+ @property
+ def vehicle(self):
+ """Gets a vehicle"""
+ return self._vehicle
+
+ @property
+ def locations(self):
+ """Gets locations"""
+ return self._locations
+
+ @property
+ def num_locations(self):
+ """Gets number of locations"""
+ return len(self.locations)
+
+ @property
+ def depot(self):
+ """Gets depot location index"""
+ return self._depot
+
+ @property
+ def demands(self):
+ """Gets demands at each location"""
+ return self._demands
+
+ @property
+ def time_per_demand_unit(self):
+ """Gets the time (in min) to load a demand"""
+ return 5 # 5 minutes/unit
+
+ @property
+ def time_windows(self):
+ """Gets (start time, end time) for each locations"""
+ return self._time_windows
#######################
# Problem Constraints #
#######################
def manhattan_distance(position_1, position_2):
- """Computes the Manhattan distance between two points"""
- return (
- abs(position_1[0] - position_2[0]) + abs(position_1[1] - position_2[1]))
+ """Computes the Manhattan distance between two points"""
+ return abs(position_1[0] - position_2[0]) + abs(position_1[1] - position_2[1])
class CreateTimeEvaluator(object):
- """Creates callback to get total times between locations."""
-
- @staticmethod
- def service_time(data, node):
- """Gets the service time for the specified location."""
- return data.demands[node] * data.time_per_demand_unit
-
- @staticmethod
- def travel_time(data, from_node, to_node):
- """Gets the travel times between two locations."""
+ """Creates callback to get total times between locations."""
+
+ @staticmethod
+ def service_time(data, node):
+ """Gets the service time for the specified location."""
+ return data.demands[node] * data.time_per_demand_unit
+
+ @staticmethod
+ def travel_time(data, from_node, to_node):
+ """Gets the travel times between two locations."""
+ if from_node == to_node:
+ travel_time = 0
+ else:
+ travel_time = (
+ manhattan_distance(data.locations[from_node], data.locations[to_node])
+ / data.vehicle.speed
+ )
+ return travel_time
+
+ def __init__(self, data):
+ """Initializes the total time matrix."""
+ self._total_time = {}
+ # precompute total time to have time callback in O(1)
+ for from_node in range(data.num_locations):
+ self._total_time[from_node] = {}
+ for to_node in range(data.num_locations):
if from_node == to_node:
- travel_time = 0
+ self._total_time[from_node][to_node] = 0
else:
- travel_time = manhattan_distance(data.locations[
- from_node], data.locations[to_node]) / data.vehicle.speed
- return travel_time
-
- def __init__(self, data):
- """Initializes the total time matrix."""
- self._total_time = {}
- # precompute total time to have time callback in O(1)
- for from_node in range(data.num_locations):
- self._total_time[from_node] = {}
- for to_node in range(data.num_locations):
- if from_node == to_node:
- self._total_time[from_node][to_node] = 0
- else:
- self._total_time[from_node][to_node] = int(
- self.service_time(data, from_node) + self.travel_time(
- data, from_node, to_node))
-
- def time_evaluator(self, from_node, to_node):
- """Returns the total time between the two nodes"""
- return self._total_time[from_node][to_node]
+ self._total_time[from_node][to_node] = int(
+ self.service_time(data, from_node)
+ + self.travel_time(data, from_node, to_node)
+ )
+
+ def time_evaluator(self, from_node, to_node):
+ """Returns the total time between the two nodes"""
+ return self._total_time[from_node][to_node]
def print_transit_time(route, time_evaluator):
- """Print transit time between nodes of a route"""
- total_time = 0
- for i, j in route:
- total_time += time_evaluator(i, j)
- print('{0} -> {1}: {2}min'.format(i, j, time_evaluator(i, j)))
- print('Total time: {0}min\n'.format(total_time))
+ """Print transit time between nodes of a route"""
+ total_time = 0
+ for i, j in route:
+ total_time += time_evaluator(i, j)
+ print('{0} -> {1}: {2}min'.format(i, j, time_evaluator(i, j)))
+ print('Total time: {0}min\n'.format(total_time))
########
# Main #
########
def main():
- """Entry point of the program"""
- # Instantiate the data problem.
- data = DataProblem()
+ """Entry point of the program"""
+ # Instantiate the data problem.
+ data = DataProblem()
- # Print Transit Time
- time_evaluator = CreateTimeEvaluator(data).time_evaluator
- print('Route 0:')
- print_transit_time([[0, 5], [5, 8], [8, 6], [6, 2], [2, 0]], time_evaluator)
+ # Print Transit Time
+ time_evaluator = CreateTimeEvaluator(data).time_evaluator
+ print('Route 0:')
+ print_transit_time([[0, 5], [5, 8], [8, 6], [6, 2], [2, 0]], time_evaluator)
- print('Route 1:')
- print_transit_time([[0, 9], [9, 14], [14, 16], [16, 10], [10, 0]],
- time_evaluator)
+ print('Route 1:')
+ print_transit_time(
+ [[0, 9], [9, 14], [14, 16], [16, 10], [10, 0]], time_evaluator
+ )
- print('Route 2:')
- print_transit_time([[0, 12], [12, 13], [13, 15], [15, 11], [11, 0]],
- time_evaluator)
+ print('Route 2:')
+ print_transit_time(
+ [[0, 12], [12, 13], [13, 15], [15, 11], [11, 0]], time_evaluator
+ )
- print('Route 3:')
- print_transit_time([[0, 7], [7, 4], [4, 3], [3, 1], [1, 0]], time_evaluator)
+ print('Route 3:')
+ print_transit_time([[0, 7], [7, 4], [4, 3], [3, 1], [1, 0]], time_evaluator)
if __name__ == '__main__':
- main()
+ main()
diff --git a/examples/python/tsp_sat.py b/examples/python/tsp_sat.py
index ade7306cc43..1cc7fbf873c 100644
--- a/examples/python/tsp_sat.py
+++ b/examples/python/tsp_sat.py
@@ -63,65 +63,65 @@
def main():
- """Entry point of the program."""
- num_nodes = len(DISTANCE_MATRIX)
- all_nodes = range(num_nodes)
- print("Num nodes =", num_nodes)
-
- # Model.
- model = cp_model.CpModel()
-
- obj_vars = []
- obj_coeffs = []
-
- # Create the circuit constraint.
- arcs = []
- arc_literals = {}
+ """Entry point of the program."""
+ num_nodes = len(DISTANCE_MATRIX)
+ all_nodes = range(num_nodes)
+ print("Num nodes =", num_nodes)
+
+ # Model.
+ model = cp_model.CpModel()
+
+ obj_vars = []
+ obj_coeffs = []
+
+ # Create the circuit constraint.
+ arcs = []
+ arc_literals = {}
+ for i in all_nodes:
+ for j in all_nodes:
+ if i == j:
+ continue
+
+ lit = model.new_bool_var("%i follows %i" % (j, i))
+ arcs.append((i, j, lit))
+ arc_literals[i, j] = lit
+
+ obj_vars.append(lit)
+ obj_coeffs.append(DISTANCE_MATRIX[i][j])
+
+ model.add_circuit(arcs)
+
+ # Minimize weighted sum of arcs. Because this s
+ model.minimize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
+
+ # Solve and print out the solution.
+ solver = cp_model.CpSolver()
+ solver.parameters.log_search_progress = True
+ # To benefit from the linearization of the circuit constraint.
+ solver.parameters.linearization_level = 2
+
+ solver.solve(model)
+ print(solver.response_stats())
+
+ current_node = 0
+ str_route = "%i" % current_node
+ route_is_finished = False
+ route_distance = 0
+ while not route_is_finished:
for i in all_nodes:
- for j in all_nodes:
- if i == j:
- continue
-
- lit = model.new_bool_var("%i follows %i" % (j, i))
- arcs.append((i, j, lit))
- arc_literals[i, j] = lit
-
- obj_vars.append(lit)
- obj_coeffs.append(DISTANCE_MATRIX[i][j])
-
- model.add_circuit(arcs)
-
- # Minimize weighted sum of arcs. Because this s
- model.minimize(sum(obj_vars[i] * obj_coeffs[i] for i in range(len(obj_vars))))
-
- # Solve and print out the solution.
- solver = cp_model.CpSolver()
- solver.parameters.log_search_progress = True
- # To benefit from the linearization of the circuit constraint.
- solver.parameters.linearization_level = 2
-
- solver.solve(model)
- print(solver.response_stats())
-
- current_node = 0
- str_route = "%i" % current_node
- route_is_finished = False
- route_distance = 0
- while not route_is_finished:
- for i in all_nodes:
- if i == current_node:
- continue
- if solver.boolean_value(arc_literals[current_node, i]):
- str_route += " -> %i" % i
- route_distance += DISTANCE_MATRIX[current_node][i]
- current_node = i
- if current_node == 0:
- route_is_finished = True
- break
-
- print("Route:", str_route)
- print("Travelled distance:", route_distance)
+ if i == current_node:
+ continue
+ if solver.boolean_value(arc_literals[current_node, i]):
+ str_route += " -> %i" % i
+ route_distance += DISTANCE_MATRIX[current_node][i]
+ current_node = i
+ if current_node == 0:
+ route_is_finished = True
+ break
+
+ print("Route:", str_route)
+ print("Travelled distance:", route_distance)
if __name__ == "__main__":
- main()
+ main()
diff --git a/examples/python/vendor_scheduling_sat.py b/examples/python/vendor_scheduling_sat.py
index 066037d3500..f0478cf51e1 100644
--- a/examples/python/vendor_scheduling_sat.py
+++ b/examples/python/vendor_scheduling_sat.py
@@ -20,148 +20,148 @@
class SolutionPrinter(cp_model.CpSolverSolutionCallback):
- """Print intermediate solutions."""
-
- def __init__(
- self,
- num_vendors,
- num_hours,
- possible_schedules,
- selected_schedules,
- hours_stat,
- min_vendors,
- ):
- cp_model.CpSolverSolutionCallback.__init__(self)
- self.__solution_count = 0
- self.__num_vendors = num_vendors
- self.__num_hours = num_hours
- self.__possible_schedules = possible_schedules
- self.__selected_schedules = selected_schedules
- self.__hours_stat = hours_stat
- self.__min_vendors = min_vendors
-
- def on_solution_callback(self):
- """Called at each new solution."""
- self.__solution_count += 1
- print("Solution %i: ", self.__solution_count)
- print(" min vendors:", self.__min_vendors)
- for i in range(self.__num_vendors):
- print(
- " - vendor %i: " % i,
- self.__possible_schedules[self.value(self.__selected_schedules[i])],
- )
- print()
-
- for j in range(self.__num_hours):
- print(" - # workers on day%2i: " % j, end=" ")
- print(self.value(self.__hours_stat[j]), end=" ")
- print()
- print()
-
- def solution_count(self):
- """Returns the number of solution found."""
- return self.__solution_count
+ """Print intermediate solutions."""
+
+ def __init__(
+ self,
+ num_vendors,
+ num_hours,
+ possible_schedules,
+ selected_schedules,
+ hours_stat,
+ min_vendors,
+ ):
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self.__solution_count = 0
+ self.__num_vendors = num_vendors
+ self.__num_hours = num_hours
+ self.__possible_schedules = possible_schedules
+ self.__selected_schedules = selected_schedules
+ self.__hours_stat = hours_stat
+ self.__min_vendors = min_vendors
+
+ def on_solution_callback(self):
+ """Called at each new solution."""
+ self.__solution_count += 1
+ print("Solution %i: ", self.__solution_count)
+ print(" min vendors:", self.__min_vendors)
+ for i in range(self.__num_vendors):
+ print(
+ " - vendor %i: " % i,
+ self.__possible_schedules[self.value(self.__selected_schedules[i])],
+ )
+ print()
+
+ for j in range(self.__num_hours):
+ print(" - # workers on day%2i: " % j, end=" ")
+ print(self.value(self.__hours_stat[j]), end=" ")
+ print()
+ print()
+
+ def solution_count(self):
+ """Returns the number of solution found."""
+ return self.__solution_count
def vendor_scheduling_sat() -> None:
- """Create the shift scheduling model and solve it."""
- # Create the model.
- model = cp_model.CpModel()
-
- #
- # data
- #
- num_vendors = 9
- num_hours = 10
- num_work_types = 1
-
- traffic = [100, 500, 100, 200, 320, 300, 200, 220, 300, 120]
- max_traffic_per_vendor = 100
-
- # Last columns are :
- # index_of_the_schedule, sum of worked hours (per work type).
- # The index is useful for branching.
- possible_schedules = [
- [1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 8],
- [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 4],
- [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 2, 5],
- [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 4],
- [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 4, 3],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0],
- ]
-
- num_possible_schedules = len(possible_schedules)
- selected_schedules = []
- vendors_stat = []
- hours_stat = []
-
- # Auxiliary data
- min_vendors = [t // max_traffic_per_vendor for t in traffic]
- all_vendors = range(num_vendors)
- all_hours = range(num_hours)
-
- #
- # Declare variables
- #
- x = {}
-
- for v in all_vendors:
- tmp = []
- for h in all_hours:
- x[v, h] = model.new_int_var(0, num_work_types, "x[%i,%i]" % (v, h))
- tmp.append(x[v, h])
- selected_schedule = model.new_int_var(
- 0, num_possible_schedules - 1, "s[%i]" % v
- )
- hours = model.new_int_var(0, num_hours, "h[%i]" % v)
- selected_schedules.append(selected_schedule)
- vendors_stat.append(hours)
- tmp.append(selected_schedule)
- tmp.append(hours)
-
- model.add_allowed_assignments(tmp, possible_schedules)
-
- #
- # Statistics and constraints for each hour
- #
+ """Create the shift scheduling model and solve it."""
+ # Create the model.
+ model = cp_model.CpModel()
+
+ #
+ # data
+ #
+ num_vendors = 9
+ num_hours = 10
+ num_work_types = 1
+
+ traffic = [100, 500, 100, 200, 320, 300, 200, 220, 300, 120]
+ max_traffic_per_vendor = 100
+
+ # Last columns are :
+ # index_of_the_schedule, sum of worked hours (per work type).
+ # The index is useful for branching.
+ possible_schedules = [
+ [1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 8],
+ [1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 4],
+ [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 2, 5],
+ [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3, 4],
+ [1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 4, 3],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0],
+ ]
+
+ num_possible_schedules = len(possible_schedules)
+ selected_schedules = []
+ vendors_stat = []
+ hours_stat = []
+
+ # Auxiliary data
+ min_vendors = [t // max_traffic_per_vendor for t in traffic]
+ all_vendors = range(num_vendors)
+ all_hours = range(num_hours)
+
+ #
+ # Declare variables
+ #
+ x = {}
+
+ for v in all_vendors:
+ tmp = []
for h in all_hours:
- workers = model.new_int_var(0, 1000, "workers[%i]" % h)
- model.add(workers == sum(x[v, h] for v in all_vendors))
- hours_stat.append(workers)
- model.add(workers * max_traffic_per_vendor >= traffic[h])
-
- #
- # Redundant constraint: sort selected_schedules
- #
- for v in range(num_vendors - 1):
- model.add(selected_schedules[v] <= selected_schedules[v + 1])
-
- # Solve model.
- solver = cp_model.CpSolver()
- solver.parameters.enumerate_all_solutions = True
- solution_printer = SolutionPrinter(
- num_vendors,
- num_hours,
- possible_schedules,
- selected_schedules,
- hours_stat,
- min_vendors,
+ x[v, h] = model.new_int_var(0, num_work_types, "x[%i,%i]" % (v, h))
+ tmp.append(x[v, h])
+ selected_schedule = model.new_int_var(
+ 0, num_possible_schedules - 1, "s[%i]" % v
)
- status = solver.solve(model, solution_printer)
- print("Status = %s" % solver.status_name(status))
-
- print("Statistics")
- print(" - conflicts : %i" % solver.num_conflicts)
- print(" - branches : %i" % solver.num_branches)
- print(" - wall time : %f s" % solver.wall_time)
- print(" - number of solutions found: %i" % solution_printer.solution_count())
+ hours = model.new_int_var(0, num_hours, "h[%i]" % v)
+ selected_schedules.append(selected_schedule)
+ vendors_stat.append(hours)
+ tmp.append(selected_schedule)
+ tmp.append(hours)
+
+ model.add_allowed_assignments(tmp, possible_schedules)
+
+ #
+ # Statistics and constraints for each hour
+ #
+ for h in all_hours:
+ workers = model.new_int_var(0, 1000, "workers[%i]" % h)
+ model.add(workers == sum(x[v, h] for v in all_vendors))
+ hours_stat.append(workers)
+ model.add(workers * max_traffic_per_vendor >= traffic[h])
+
+ #
+ # Redundant constraint: sort selected_schedules
+ #
+ for v in range(num_vendors - 1):
+ model.add(selected_schedules[v] <= selected_schedules[v + 1])
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ solver.parameters.enumerate_all_solutions = True
+ solution_printer = SolutionPrinter(
+ num_vendors,
+ num_hours,
+ possible_schedules,
+ selected_schedules,
+ hours_stat,
+ min_vendors,
+ )
+ status = solver.solve(model, solution_printer)
+ print("Status = %s" % solver.status_name(status))
+
+ print("Statistics")
+ print(" - conflicts : %i" % solver.num_conflicts)
+ print(" - branches : %i" % solver.num_branches)
+ print(" - wall time : %f s" % solver.wall_time)
+ print(" - number of solutions found: %i" % solution_printer.solution_count())
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- vendor_scheduling_sat()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ vendor_scheduling_sat()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/wedding_optimal_chart_sat.py b/examples/python/wedding_optimal_chart_sat.py
index fc95b0f04ef..c582a497274 100644
--- a/examples/python/wedding_optimal_chart_sat.py
+++ b/examples/python/wedding_optimal_chart_sat.py
@@ -43,208 +43,209 @@
class WeddingChartPrinter(cp_model.CpSolverSolutionCallback):
- """Print intermediate solutions."""
-
- def __init__(self, seats, names, num_tables, num_guests):
- cp_model.CpSolverSolutionCallback.__init__(self)
- self.__solution_count = 0
- self.__start_time = time.time()
- self.__seats = seats
- self.__names = names
- self.__num_tables = num_tables
- self.__num_guests = num_guests
-
- def on_solution_callback(self):
- current_time = time.time()
- objective = self.objective_value
- print(
- "Solution %i, time = %f s, objective = %i"
- % (self.__solution_count, current_time - self.__start_time, objective)
- )
- self.__solution_count += 1
+ """Print intermediate solutions."""
+
+ def __init__(self, seats, names, num_tables, num_guests):
+ cp_model.CpSolverSolutionCallback.__init__(self)
+ self.__solution_count = 0
+ self.__start_time = time.time()
+ self.__seats = seats
+ self.__names = names
+ self.__num_tables = num_tables
+ self.__num_guests = num_guests
+
+ def on_solution_callback(self):
+ current_time = time.time()
+ objective = self.objective_value
+ print(
+ "Solution %i, time = %f s, objective = %i"
+ % (self.__solution_count, current_time - self.__start_time, objective)
+ )
+ self.__solution_count += 1
- for t in range(self.__num_tables):
- print("Table %d: " % t)
- for g in range(self.__num_guests):
- if self.value(self.__seats[(t, g)]):
- print(" " + self.__names[g])
+ for t in range(self.__num_tables):
+ print("Table %d: " % t)
+ for g in range(self.__num_guests):
+ if self.value(self.__seats[(t, g)]):
+ print(" " + self.__names[g])
- def num_solutions(self) -> int:
- return self.__solution_count
+ def num_solutions(self) -> int:
+ return self.__solution_count
def build_data():
- """Build the data model."""
- # Easy problem (from the paper)
- # num_tables = 2
- # table_capacity = 10
- # min_known_neighbors = 1
-
- # Slightly harder problem (also from the paper)
- num_tables = 5
- table_capacity = 4
- min_known_neighbors = 1
-
- # Connection matrix: who knows who, and how strong
- # is the relation
- connections = [
- [1, 50, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- [50, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- [1, 1, 1, 50, 1, 1, 1, 1, 10, 0, 0, 0, 0, 0, 0, 0, 0],
- [1, 1, 50, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- [1, 1, 1, 1, 1, 50, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- [1, 1, 1, 1, 50, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- [1, 1, 1, 1, 1, 1, 1, 50, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- [1, 1, 1, 1, 1, 1, 50, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- [1, 1, 10, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 50, 1, 1, 1, 1, 1, 1],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 1, 1, 1, 1, 1, 1, 1],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
- [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
- ]
-
- # Names of the guests. B: Bride side, G: Groom side
- names = [
- "Deb (B)",
- "John (B)",
- "Martha (B)",
- "Travis (B)",
- "Allan (B)",
- "Lois (B)",
- "Jayne (B)",
- "Brad (B)",
- "Abby (B)",
- "Mary Helen (G)",
- "Lee (G)",
- "Annika (G)",
- "Carl (G)",
- "Colin (G)",
- "Shirley (G)",
- "DeAnn (G)",
- "Lori (G)",
- ]
- return num_tables, table_capacity, min_known_neighbors, connections, names
+ """Build the data model."""
+ # Easy problem (from the paper)
+ # num_tables = 2
+ # table_capacity = 10
+ # min_known_neighbors = 1
+
+ # Slightly harder problem (also from the paper)
+ num_tables = 5
+ table_capacity = 4
+ min_known_neighbors = 1
+
+ # Connection matrix: who knows who, and how strong
+ # is the relation
+ connections = [
+ [1, 50, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [50, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 50, 1, 1, 1, 1, 10, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 50, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 50, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 50, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 1, 50, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 1, 1, 1, 1, 50, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [1, 1, 10, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 50, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 50, 1, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1],
+ ]
+
+ # Names of the guests. B: Bride side, G: Groom side
+ names = [
+ "Deb (B)",
+ "John (B)",
+ "Martha (B)",
+ "Travis (B)",
+ "Allan (B)",
+ "Lois (B)",
+ "Jayne (B)",
+ "Brad (B)",
+ "Abby (B)",
+ "Mary Helen (G)",
+ "Lee (G)",
+ "Annika (G)",
+ "Carl (G)",
+ "Colin (G)",
+ "Shirley (G)",
+ "DeAnn (G)",
+ "Lori (G)",
+ ]
+ return num_tables, table_capacity, min_known_neighbors, connections, names
def solve_with_discrete_model() -> None:
- """Discrete approach."""
- num_tables, table_capacity, min_known_neighbors, connections, names = build_data()
-
- num_guests = len(connections)
-
- all_tables = range(num_tables)
- all_guests = range(num_guests)
-
- # Create the cp model.
- model = cp_model.CpModel()
-
- #
- # Decision variables
- #
- seats = {}
- for t in all_tables:
- for g in all_guests:
- seats[(t, g)] = model.new_bool_var("guest %i seats on table %i" % (g, t))
-
- colocated = {}
- for g1 in range(num_guests - 1):
- for g2 in range(g1 + 1, num_guests):
- colocated[(g1, g2)] = model.new_bool_var(
- "guest %i seats with guest %i" % (g1, g2)
- )
-
- same_table = {}
- for g1 in range(num_guests - 1):
- for g2 in range(g1 + 1, num_guests):
- for t in all_tables:
- same_table[(g1, g2, t)] = model.new_bool_var(
- "guest %i seats with guest %i on table %i" % (g1, g2, t)
- )
-
- # Objective
- model.maximize(
- sum(
- connections[g1][g2] * colocated[g1, g2]
- for g1 in range(num_guests - 1)
- for g2 in range(g1 + 1, num_guests)
- if connections[g1][g2] > 0
- )
- )
+ """Discrete approach."""
+ num_tables, table_capacity, min_known_neighbors, connections, names = (
+ build_data()
+ )
- #
- # Constraints
- #
+ num_guests = len(connections)
- # Everybody seats at one table.
- for g in all_guests:
- model.add(sum(seats[(t, g)] for t in all_tables) == 1)
-
- # Tables have a max capacity.
- for t in all_tables:
- model.add(sum(seats[(t, g)] for g in all_guests) <= table_capacity)
-
- # Link colocated with seats
- for g1 in range(num_guests - 1):
- for g2 in range(g1 + 1, num_guests):
- for t in all_tables:
- # Link same_table and seats.
- model.add_bool_or(
- [
- ~seats[(t, g1)],
- ~seats[(t, g2)],
- same_table[(g1, g2, t)],
- ]
- )
- model.add_implication(same_table[(g1, g2, t)], seats[(t, g1)])
- model.add_implication(same_table[(g1, g2, t)], seats[(t, g2)])
-
- # Link colocated and same_table.
- model.add(
- sum(same_table[(g1, g2, t)] for t in all_tables) == colocated[(g1, g2)]
- )
-
- # Min known neighbors rule.
+ all_tables = range(num_tables)
+ all_guests = range(num_guests)
+
+ # Create the cp model.
+ model = cp_model.CpModel()
+
+ #
+ # Decision variables
+ #
+ seats = {}
+ for t in all_tables:
for g in all_guests:
- model.add(
- sum(
- same_table[(g, g2, t)]
- for g2 in range(g + 1, num_guests)
- for t in all_tables
- if connections[g][g2] > 0
- )
- + sum(
- same_table[(g1, g, t)]
- for g1 in range(g)
- for t in all_tables
- if connections[g1][g] > 0
- )
- >= min_known_neighbors
+ seats[(t, g)] = model.new_bool_var("guest %i seats on table %i" % (g, t))
+
+ colocated = {}
+ for g1 in range(num_guests - 1):
+ for g2 in range(g1 + 1, num_guests):
+ colocated[(g1, g2)] = model.new_bool_var(
+ "guest %i seats with guest %i" % (g1, g2)
+ )
+
+ same_table = {}
+ for g1 in range(num_guests - 1):
+ for g2 in range(g1 + 1, num_guests):
+ for t in all_tables:
+ same_table[(g1, g2, t)] = model.new_bool_var(
+ "guest %i seats with guest %i on table %i" % (g1, g2, t)
+ )
+
+ # Objective
+ model.maximize(
+ sum(
+ connections[g1][g2] * colocated[g1, g2]
+ for g1 in range(num_guests - 1)
+ for g2 in range(g1 + 1, num_guests)
+ if connections[g1][g2] > 0
+ )
+ )
+
+ #
+ # Constraints
+ #
+
+ # Everybody seats at one table.
+ for g in all_guests:
+ model.add(sum(seats[(t, g)] for t in all_tables) == 1)
+
+ # Tables have a max capacity.
+ for t in all_tables:
+ model.add(sum(seats[(t, g)] for g in all_guests) <= table_capacity)
+
+ # Link colocated with seats
+ for g1 in range(num_guests - 1):
+ for g2 in range(g1 + 1, num_guests):
+ for t in all_tables:
+ # Link same_table and seats.
+ model.add_bool_or([
+ ~seats[(t, g1)],
+ ~seats[(t, g2)],
+ same_table[(g1, g2, t)],
+ ])
+ model.add_implication(same_table[(g1, g2, t)], seats[(t, g1)])
+ model.add_implication(same_table[(g1, g2, t)], seats[(t, g2)])
+
+ # Link colocated and same_table.
+ model.add(
+ sum(same_table[(g1, g2, t)] for t in all_tables)
+ == colocated[(g1, g2)]
+ )
+
+ # Min known neighbors rule.
+ for g in all_guests:
+ model.add(
+ sum(
+ same_table[(g, g2, t)]
+ for g2 in range(g + 1, num_guests)
+ for t in all_tables
+ if connections[g][g2] > 0
+ )
+ + sum(
+ same_table[(g1, g, t)]
+ for g1 in range(g)
+ for t in all_tables
+ if connections[g1][g] > 0
)
+ >= min_known_neighbors
+ )
- # Symmetry breaking. First guest seats on the first table.
- model.add(seats[(0, 0)] == 1)
+ # Symmetry breaking. First guest seats on the first table.
+ model.add(seats[(0, 0)] == 1)
- ### Solve model.
- solver = cp_model.CpSolver()
- solution_printer = WeddingChartPrinter(seats, names, num_tables, num_guests)
- solver.solve(model, solution_printer)
+ ### Solve model.
+ solver = cp_model.CpSolver()
+ solution_printer = WeddingChartPrinter(seats, names, num_tables, num_guests)
+ solver.solve(model, solution_printer)
- print("Statistics")
- print(" - conflicts : %i" % solver.num_conflicts)
- print(" - branches : %i" % solver.num_branches)
- print(" - wall time : %f s" % solver.wall_time)
- print(" - num solutions: %i" % solution_printer.num_solutions())
+ print("Statistics")
+ print(" - conflicts : %i" % solver.num_conflicts)
+ print(" - branches : %i" % solver.num_branches)
+ print(" - wall time : %f s" % solver.wall_time)
+ print(" - num solutions: %i" % solution_printer.num_solutions())
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
- solve_with_discrete_model()
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
+ solve_with_discrete_model()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/weighted_latency_problem_sat.py b/examples/python/weighted_latency_problem_sat.py
index 36616bb26b3..892ae443685 100644
--- a/examples/python/weighted_latency_problem_sat.py
+++ b/examples/python/weighted_latency_problem_sat.py
@@ -23,11 +23,15 @@
from ortools.sat.python import cp_model
_NUM_NODES = flags.DEFINE_integer("num_nodes", 12, "Number of nodes to visit.")
-_GRID_SIZE = flags.DEFINE_integer("grid_size", 20, "Size of the grid where nodes are.")
+_GRID_SIZE = flags.DEFINE_integer(
+ "grid_size", 20, "Size of the grid where nodes are."
+)
_PROFIT_RANGE = flags.DEFINE_integer("profit_range", 50, "Range of profit.")
_SEED = flags.DEFINE_integer("seed", 0, "Random seed.")
_PARAMS = flags.DEFINE_string(
- "params", "num_search_workers:16, max_time_in_seconds:5", "Sat solver parameters."
+ "params",
+ "num_search_workers:16, max_time_in_seconds:5",
+ "Sat solver parameters.",
)
_PROTO_FILE = flags.DEFINE_string(
"proto_file", "", "If not empty, output the proto to this file."
@@ -35,80 +39,81 @@
def build_model():
- """Create the nodes and the profit."""
- random.seed(_SEED.value)
- x = []
- y = []
+ """Create the nodes and the profit."""
+ random.seed(_SEED.value)
+ x = []
+ y = []
+ x.append(random.randint(0, _GRID_SIZE.value))
+ y.append(random.randint(0, _GRID_SIZE.value))
+ for _ in range(_NUM_NODES.value):
x.append(random.randint(0, _GRID_SIZE.value))
y.append(random.randint(0, _GRID_SIZE.value))
- for _ in range(_NUM_NODES.value):
- x.append(random.randint(0, _GRID_SIZE.value))
- y.append(random.randint(0, _GRID_SIZE.value))
- profits = []
- profits.append(0)
- for _ in range(_NUM_NODES.value):
- profits.append(random.randint(1, _PROFIT_RANGE.value))
- sum_of_profits = sum(profits)
- profits = [p / sum_of_profits for p in profits]
+ profits = []
+ profits.append(0)
+ for _ in range(_NUM_NODES.value):
+ profits.append(random.randint(1, _PROFIT_RANGE.value))
+ sum_of_profits = sum(profits)
+ profits = [p / sum_of_profits for p in profits]
- return x, y, profits
+ return x, y, profits
def solve_with_cp_sat(x, y, profits) -> None:
- """Solves the problem with the CP-SAT solver."""
- model = cp_model.CpModel()
-
- # because of the manhattan distance, the sum of distances is bounded by this.
- horizon = _GRID_SIZE.value * 2 * _NUM_NODES.value
- times = [
- model.new_int_var(0, horizon, f"x_{i}") for i in range(_NUM_NODES.value + 1)
- ]
-
- # Node 0 is the start node.
- model.add(times[0] == 0)
-
- # Create the circuit constraint.
- arcs = []
- for i in range(_NUM_NODES.value + 1):
- for j in range(_NUM_NODES.value + 1):
- if i == j:
- continue
- # We use a manhattan distance between nodes.
- distance = abs(x[i] - x[j]) + abs(y[i] - y[j])
- lit = model.new_bool_var(f"{i}_to_{j}")
- arcs.append((i, j, lit))
-
- # add transitions between nodes.
- if i == 0:
- # Initial transition
- model.add(times[j] == distance).only_enforce_if(lit)
- elif j != 0:
- # We do not care for the last transition.
- model.add(times[j] == times[i] + distance).only_enforce_if(lit)
- model.add_circuit(arcs)
-
- model.minimize(cp_model.LinearExpr.weighted_sum(times, profits))
-
- if _PROTO_FILE.value:
- model.export_to_file(_PROTO_FILE.value)
-
- # Solve model.
- solver = cp_model.CpSolver()
- if _PARAMS.value:
- text_format.Parse(_PARAMS.value, solver.parameters)
- solver.parameters.log_search_progress = True
- solver.solve(model)
+ """Solves the problem with the CP-SAT solver."""
+ model = cp_model.CpModel()
+
+ # because of the manhattan distance, the sum of distances is bounded by this.
+ horizon = _GRID_SIZE.value * 2 * _NUM_NODES.value
+ times = [
+ model.new_int_var(0, horizon, f"x_{i}")
+ for i in range(_NUM_NODES.value + 1)
+ ]
+
+ # Node 0 is the start node.
+ model.add(times[0] == 0)
+
+ # Create the circuit constraint.
+ arcs = []
+ for i in range(_NUM_NODES.value + 1):
+ for j in range(_NUM_NODES.value + 1):
+ if i == j:
+ continue
+ # We use a manhattan distance between nodes.
+ distance = abs(x[i] - x[j]) + abs(y[i] - y[j])
+ lit = model.new_bool_var(f"{i}_to_{j}")
+ arcs.append((i, j, lit))
+
+ # add transitions between nodes.
+ if i == 0:
+ # Initial transition
+ model.add(times[j] == distance).only_enforce_if(lit)
+ elif j != 0:
+ # We do not care for the last transition.
+ model.add(times[j] == times[i] + distance).only_enforce_if(lit)
+ model.add_circuit(arcs)
+
+ model.minimize(cp_model.LinearExpr.weighted_sum(times, profits))
+
+ if _PROTO_FILE.value:
+ model.export_to_file(_PROTO_FILE.value)
+
+ # Solve model.
+ solver = cp_model.CpSolver()
+ if _PARAMS.value:
+ text_format.Parse(_PARAMS.value, solver.parameters)
+ solver.parameters.log_search_progress = True
+ solver.solve(model)
def main(argv: Sequence[str]) -> None:
- if len(argv) > 1:
- raise app.UsageError("Too many command-line arguments.")
+ if len(argv) > 1:
+ raise app.UsageError("Too many command-line arguments.")
- x, y, profits = build_model()
- solve_with_cp_sat(x, y, profits)
- # TODO(user): Implement routing model.
+ x, y, profits = build_model()
+ solve_with_cp_sat(x, y, profits)
+ # TODO(user): Implement routing model.
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/python/zebra_sat.py b/examples/python/zebra_sat.py
index 204be39f1ac..2bf81d424af 100755
--- a/examples/python/zebra_sat.py
+++ b/examples/python/zebra_sat.py
@@ -39,84 +39,90 @@
# pylint: disable=too-many-statements
def solve_zebra():
- """Solves the zebra problem."""
-
- # Create the model.
- model = cp_model.CpModel()
-
- red = model.new_int_var(1, 5, "red")
- green = model.new_int_var(1, 5, "green")
- yellow = model.new_int_var(1, 5, "yellow")
- blue = model.new_int_var(1, 5, "blue")
- ivory = model.new_int_var(1, 5, "ivory")
-
- englishman = model.new_int_var(1, 5, "englishman")
- spaniard = model.new_int_var(1, 5, "spaniard")
- japanese = model.new_int_var(1, 5, "japanese")
- ukrainian = model.new_int_var(1, 5, "ukrainian")
- norwegian = model.new_int_var(1, 5, "norwegian")
-
- dog = model.new_int_var(1, 5, "dog")
- snails = model.new_int_var(1, 5, "snails")
- fox = model.new_int_var(1, 5, "fox")
- zebra = model.new_int_var(1, 5, "zebra")
- horse = model.new_int_var(1, 5, "horse")
-
- tea = model.new_int_var(1, 5, "tea")
- coffee = model.new_int_var(1, 5, "coffee")
- water = model.new_int_var(1, 5, "water")
- milk = model.new_int_var(1, 5, "milk")
- fruit_juice = model.new_int_var(1, 5, "fruit juice")
-
- old_gold = model.new_int_var(1, 5, "old gold")
- kools = model.new_int_var(1, 5, "kools")
- chesterfields = model.new_int_var(1, 5, "chesterfields")
- lucky_strike = model.new_int_var(1, 5, "lucky strike")
- parliaments = model.new_int_var(1, 5, "parliaments")
-
- model.add_all_different(red, green, yellow, blue, ivory)
- model.add_all_different(englishman, spaniard, japanese, ukrainian, norwegian)
- model.add_all_different(dog, snails, fox, zebra, horse)
- model.add_all_different(tea, coffee, water, milk, fruit_juice)
- model.add_all_different(parliaments, kools, chesterfields, lucky_strike, old_gold)
-
- model.add(englishman == red)
- model.add(spaniard == dog)
- model.add(coffee == green)
- model.add(ukrainian == tea)
- model.add(green == ivory + 1)
- model.add(old_gold == snails)
- model.add(kools == yellow)
- model.add(milk == 3)
- model.add(norwegian == 1)
-
- diff_fox_chesterfields = model.new_int_var(-4, 4, "diff_fox_chesterfields")
- model.add(diff_fox_chesterfields == fox - chesterfields)
- model.add_abs_equality(1, diff_fox_chesterfields)
-
- diff_horse_kools = model.new_int_var(-4, 4, "diff_horse_kools")
- model.add(diff_horse_kools == horse - kools)
- model.add_abs_equality(1, diff_horse_kools)
-
- model.add(lucky_strike == fruit_juice)
- model.add(japanese == parliaments)
-
- diff_norwegian_blue = model.new_int_var(-4, 4, "diff_norwegian_blue")
- model.add(diff_norwegian_blue == norwegian - blue)
- model.add_abs_equality(1, diff_norwegian_blue)
-
- # Solve and print out the solution.
- solver = cp_model.CpSolver()
- status = solver.solve(model)
-
- if status == cp_model.OPTIMAL:
- people = [englishman, spaniard, japanese, ukrainian, norwegian]
- water_drinker = [p for p in people if solver.value(p) == solver.value(water)][0]
- zebra_owner = [p for p in people if solver.value(p) == solver.value(zebra)][0]
- print("The", water_drinker.name, "drinks water.")
- print("The", zebra_owner.name, "owns the zebra.")
- else:
- print("No solutions to the zebra problem, this is unusual!")
+ """Solves the zebra problem."""
+
+ # Create the model.
+ model = cp_model.CpModel()
+
+ red = model.new_int_var(1, 5, "red")
+ green = model.new_int_var(1, 5, "green")
+ yellow = model.new_int_var(1, 5, "yellow")
+ blue = model.new_int_var(1, 5, "blue")
+ ivory = model.new_int_var(1, 5, "ivory")
+
+ englishman = model.new_int_var(1, 5, "englishman")
+ spaniard = model.new_int_var(1, 5, "spaniard")
+ japanese = model.new_int_var(1, 5, "japanese")
+ ukrainian = model.new_int_var(1, 5, "ukrainian")
+ norwegian = model.new_int_var(1, 5, "norwegian")
+
+ dog = model.new_int_var(1, 5, "dog")
+ snails = model.new_int_var(1, 5, "snails")
+ fox = model.new_int_var(1, 5, "fox")
+ zebra = model.new_int_var(1, 5, "zebra")
+ horse = model.new_int_var(1, 5, "horse")
+
+ tea = model.new_int_var(1, 5, "tea")
+ coffee = model.new_int_var(1, 5, "coffee")
+ water = model.new_int_var(1, 5, "water")
+ milk = model.new_int_var(1, 5, "milk")
+ fruit_juice = model.new_int_var(1, 5, "fruit juice")
+
+ old_gold = model.new_int_var(1, 5, "old gold")
+ kools = model.new_int_var(1, 5, "kools")
+ chesterfields = model.new_int_var(1, 5, "chesterfields")
+ lucky_strike = model.new_int_var(1, 5, "lucky strike")
+ parliaments = model.new_int_var(1, 5, "parliaments")
+
+ model.add_all_different(red, green, yellow, blue, ivory)
+ model.add_all_different(englishman, spaniard, japanese, ukrainian, norwegian)
+ model.add_all_different(dog, snails, fox, zebra, horse)
+ model.add_all_different(tea, coffee, water, milk, fruit_juice)
+ model.add_all_different(
+ parliaments, kools, chesterfields, lucky_strike, old_gold
+ )
+
+ model.add(englishman == red)
+ model.add(spaniard == dog)
+ model.add(coffee == green)
+ model.add(ukrainian == tea)
+ model.add(green == ivory + 1)
+ model.add(old_gold == snails)
+ model.add(kools == yellow)
+ model.add(milk == 3)
+ model.add(norwegian == 1)
+
+ diff_fox_chesterfields = model.new_int_var(-4, 4, "diff_fox_chesterfields")
+ model.add(diff_fox_chesterfields == fox - chesterfields)
+ model.add_abs_equality(1, diff_fox_chesterfields)
+
+ diff_horse_kools = model.new_int_var(-4, 4, "diff_horse_kools")
+ model.add(diff_horse_kools == horse - kools)
+ model.add_abs_equality(1, diff_horse_kools)
+
+ model.add(lucky_strike == fruit_juice)
+ model.add(japanese == parliaments)
+
+ diff_norwegian_blue = model.new_int_var(-4, 4, "diff_norwegian_blue")
+ model.add(diff_norwegian_blue == norwegian - blue)
+ model.add_abs_equality(1, diff_norwegian_blue)
+
+ # Solve and print out the solution.
+ solver = cp_model.CpSolver()
+ status = solver.solve(model)
+
+ if status == cp_model.OPTIMAL:
+ people = [englishman, spaniard, japanese, ukrainian, norwegian]
+ water_drinker = [
+ p for p in people if solver.value(p) == solver.value(water)
+ ][0]
+ zebra_owner = [p for p in people if solver.value(p) == solver.value(zebra)][
+ 0
+ ]
+ print("The", water_drinker.name, "drinks water.")
+ print("The", zebra_owner.name, "owns the zebra.")
+ else:
+ print("No solutions to the zebra problem, this is unusual!")
solve_zebra()
diff --git a/examples/service/solve_math_opt_model_via_http.py b/examples/service/solve_math_opt_model_via_http.py
index 2142906218e..59bbccd1b6f 100644
--- a/examples/service/solve_math_opt_model_via_http.py
+++ b/examples/service/solve_math_opt_model_via_http.py
@@ -30,42 +30,42 @@
def request_example() -> None:
- """Run example using MathOpt `remote_http_solve` function."""
- # Set up the API key.
- api_key = _API_KEY.value
- if not api_key:
- print(
- "API key is required. See"
- " https://developers.google.com/optimization/service/setup for"
- " instructions."
- )
- return
+ """Run example using MathOpt `remote_http_solve` function."""
+ # Set up the API key.
+ api_key = _API_KEY.value
+ if not api_key:
+ print(
+ "API key is required. See"
+ " https://developers.google.com/optimization/service/setup for"
+ " instructions."
+ )
+ return
- # Build a MathOpt model
- model = mathopt.Model(name="my_model")
- x = model.add_binary_variable(name="x")
- y = model.add_variable(lb=0.0, ub=2.5, name="y")
- model.add_linear_constraint(x + y <= 1.5, name="c")
- model.maximize(2 * x + y)
- try:
- result, logs = remote_http_solve.remote_http_solve(
- model,
- mathopt.SolverType.GSCIP,
- mathopt.SolveParameters(enable_output=True),
- api_key=api_key,
- )
- print("Objective value: ", result.objective_value())
- print("x: ", result.variable_values(x))
- print("y: ", result.variable_values(y))
- print("\n".join(logs))
- except remote_http_solve.OptimizationServiceError as err:
- print(err)
+ # Build a MathOpt model
+ model = mathopt.Model(name="my_model")
+ x = model.add_binary_variable(name="x")
+ y = model.add_variable(lb=0.0, ub=2.5, name="y")
+ model.add_linear_constraint(x + y <= 1.5, name="c")
+ model.maximize(2 * x + y)
+ try:
+ result, logs = remote_http_solve.remote_http_solve(
+ model,
+ mathopt.SolverType.GSCIP,
+ mathopt.SolveParameters(enable_output=True),
+ api_key=api_key,
+ )
+ print("Objective value: ", result.objective_value())
+ print("x: ", result.variable_values(x))
+ print("y: ", result.variable_values(y))
+ print("\n".join(logs))
+ except remote_http_solve.OptimizationServiceError as err:
+ print(err)
def main(argv: Sequence[str]) -> None:
- del argv # Unused.
- request_example()
+ del argv # Unused.
+ request_example()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/examples/tests/dual_loading.py b/examples/tests/dual_loading.py
index 8ab1c1c88ea..3ff44591fde 100755
--- a/examples/tests/dual_loading.py
+++ b/examples/tests/dual_loading.py
@@ -5,7 +5,7 @@
def main():
cp = pywrapcp.Solver("test")
- lp = pywraplp.Solver.CreateSolver('GLOP')
+ lp = pywraplp.Solver.CreateSolver("GLOP")
if __name__ == "__main__":
diff --git a/examples/tests/issue117.py b/examples/tests/issue117.py
index 68ec9d59a48..4801077c023 100755
--- a/examples/tests/issue117.py
+++ b/examples/tests/issue117.py
@@ -1,10 +1,10 @@
#!/usr/bin/env python3
from collections import namedtuple
-from ortools.constraint_solver import pywrapcp
+from ortools.routing import pywraprouting
VEHICLE_COUNT = 30
VEHICLE_CAPACITY = 200
-Customer = namedtuple("Customer", ['index', 'demand', 'x', 'y'])
+Customer = namedtuple('Customer', ['index', 'demand', 'x', 'y'])
print('Init')
@@ -14,21 +14,22 @@
customers.append(Customer(1, 1, 2.0, 2.0))
customer_count = len(customers)
-manager = pywrapcp.RoutingIndexManager(3, VEHICLE_COUNT, 0)
-routing = pywrapcp.RoutingModel(manager)
+manager = pywraprouting.RoutingIndexManager(3, VEHICLE_COUNT, 0)
+routing = pywraprouting.RoutingModel(manager)
print('Demand Constraint')
demands = []
for i in range(0, customer_count):
- demands.append(customers[i][1])
-routing.AddVectorDimension(demands, VEHICLE_CAPACITY, True, "Demand")
+ demands.append(customers[i][1])
+routing.AddVectorDimension(demands, VEHICLE_CAPACITY, True, 'Demand')
print('Adding Costs')
def distance_callback(from_index, to_index):
- #static just for the sake of the example
- return 1
+ # static just for the sake of the example
+ return 1
+
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
@@ -41,25 +42,25 @@ def distance_callback(from_index, to_index):
routes = []
for i in range(0, routing.vehicles()):
- route_number = i
- routes.append([])
- node = routing.Start(route_number)
- route = []
- route.append(0)
- if routing.IsVehicleUsed(assignment, i):
- while True:
- node = assignment.Value(routing.NextVar(node))
+ route_number = i
+ routes.append([])
+ node = routing.Start(route_number)
+ route = []
+ route.append(0)
+ if routing.IsVehicleUsed(assignment, i):
+ while True:
+ node = assignment.Value(routing.NextVar(node))
- if not routing.IsEnd(node):
- route.append(int(node))
- else:
- break
+ if not routing.IsEnd(node):
+ route.append(int(node))
+ else:
+ break
- route.append(0)
- routes[route_number].append(route)
+ route.append(0)
+ routes[route_number].append(route)
-#This are the routes as list of lists
+# This are the routes as list of lists
routes = [el[0] for el in routes]
-#Now try to read the routes into a new assigment object fails
+# Now try to read the routes into a new assigment object fails
assignment2 = routing.ReadAssignmentFromRoutes(routes, True)
diff --git a/examples/tests/issue1231.py b/examples/tests/issue1231.py
index e78a2dc933a..bcf8353a862 100755
--- a/examples/tests/issue1231.py
+++ b/examples/tests/issue1231.py
@@ -24,9 +24,10 @@
from ortools.constraint_solver import pywrapcp
from os import abort
+
def CPIsFun():
# Constraint programming engine
- solver = pywrapcp.Solver('CP is fun!');
+ solver = pywrapcp.Solver('CP is fun!')
kBase = 10
@@ -34,16 +35,16 @@ def CPIsFun():
digits = list(range(0, kBase))
digits_without_zero = list(range(1, kBase))
- c = solver.IntVar(digits_without_zero, 'C');
- p = solver.IntVar(digits, 'P');
- i = solver.IntVar(digits_without_zero, 'I');
- s = solver.IntVar(digits, 'S');
- f = solver.IntVar(digits_without_zero, 'F');
- u = solver.IntVar(digits, 'U');
- n = solver.IntVar(digits, 'N');
- t = solver.IntVar(digits_without_zero, 'T');
- r = solver.IntVar(digits, 'R');
- e = solver.IntVar(digits, 'E');
+ c = solver.IntVar(digits_without_zero, 'C')
+ p = solver.IntVar(digits, 'P')
+ i = solver.IntVar(digits_without_zero, 'I')
+ s = solver.IntVar(digits, 'S')
+ f = solver.IntVar(digits_without_zero, 'F')
+ u = solver.IntVar(digits, 'U')
+ n = solver.IntVar(digits, 'N')
+ t = solver.IntVar(digits_without_zero, 'T')
+ r = solver.IntVar(digits, 'R')
+ e = solver.IntVar(digits, 'E')
# We need to group variables in a list to use the constraint AllDifferent.
letters = [c, p, i, s, f, u, n, t, r, e]
@@ -55,20 +56,30 @@ def CPIsFun():
solver.Add(solver.AllDifferent(letters))
# CP + IS + FUN = TRUE
- solver.Add (p + s + n + kBase * (c + i + u) + kBase * kBase * f ==
- e + kBase * u + kBase * kBase * r + kBase * kBase * kBase * t)
+ solver.Add(
+ p + s + n + kBase * (c + i + u) + kBase * kBase * f
+ == e + kBase * u + kBase * kBase * r + kBase * kBase * kBase * t
+ )
- db = solver.Phase(letters, solver.INT_VAR_DEFAULT,
- solver.INT_VALUE_DEFAULT)
+ db = solver.Phase(letters, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
solver.NewSearch(db)
while solver.NextSolution():
print(letters)
# Is CP + IS + FUN = TRUE?
- assert (kBase*c.Value() + p.Value() + kBase*i.Value() + s.Value() +
- kBase*kBase*f.Value() + kBase*u.Value() + n.Value() ==
- kBase*kBase*kBase*t.Value() + kBase*kBase*r.Value() +
- kBase*u.Value() + e.Value())
+ assert (
+ kBase * c.Value()
+ + p.Value()
+ + kBase * i.Value()
+ + s.Value()
+ + kBase * kBase * f.Value()
+ + kBase * u.Value()
+ + n.Value()
+ == kBase * kBase * kBase * t.Value()
+ + kBase * kBase * r.Value()
+ + kBase * u.Value()
+ + e.Value()
+ )
solver.EndSearch()
diff --git a/examples/tests/issue128.py b/examples/tests/issue128.py
index 71286d5cf46..87435e0bc7e 100755
--- a/examples/tests/issue128.py
+++ b/examples/tests/issue128.py
@@ -1,9 +1,10 @@
#!/usr/bin/env python3
from ortools.constraint_solver import pywrapcp
+
def test_v0():
- print('test_v0')
- solver = pywrapcp.Solver('')
+ print("test_v0")
+ solver = pywrapcp.Solver("")
# we have two tasks of durations 4 and 7
task1 = solver.FixedDurationIntervalVar(0, 5, 4, False, "task1")
@@ -11,8 +12,12 @@ def test_v0():
tasks = [task1, task2]
# to each task, a post task of duration 64 is attached
- postTask1 = solver.FixedDurationIntervalVar(4, 74 + 64, 64, False, "postTask1")
- postTask2 = solver.FixedDurationIntervalVar(4, 77 + 64, 64, False, "postTask2")
+ postTask1 = solver.FixedDurationIntervalVar(
+ 4, 74 + 64, 64, False, "postTask1"
+ )
+ postTask2 = solver.FixedDurationIntervalVar(
+ 4, 77 + 64, 64, False, "postTask2"
+ )
postTasks = [postTask1, postTask2]
solver.Add(postTask1.StartsAtEnd(task1))
@@ -25,24 +30,38 @@ def test_v0():
postTask2UsesRes1 = solver.IntVar(0, 1, "post task 2 using resource 1")
postTask2UsesRes2 = solver.IntVar(0, 1, "post task 2 using resource 2")
- indicators = [postTask1UsesRes1, postTask1UsesRes2, postTask2UsesRes1, postTask2UsesRes2]
+ indicators = [
+ postTask1UsesRes1,
+ postTask1UsesRes2,
+ postTask2UsesRes1,
+ postTask2UsesRes2,
+ ]
# each post task needs exactly one resource
solver.Add(postTask1UsesRes1 + postTask1UsesRes2 == 1)
solver.Add(postTask2UsesRes1 + postTask2UsesRes2 == 1)
# each resource cannot be used simultaneously by more than one post task
- solver.Add(solver.Cumulative(postTasks, [postTask1UsesRes1, postTask2UsesRes1], 1, "cumul1"))
- solver.Add(solver.Cumulative(postTasks, [postTask1UsesRes2, postTask2UsesRes2], 1, "cumul2"))
+ solver.Add(
+ solver.Cumulative(
+ postTasks, [postTask1UsesRes1, postTask2UsesRes1], 1, "cumul1"
+ )
+ )
+ solver.Add(
+ solver.Cumulative(
+ postTasks, [postTask1UsesRes2, postTask2UsesRes2], 1, "cumul2"
+ )
+ )
# using constant demands instead, the correct solution is found
# solver.Add(solver.Cumulative(postTasks, [0, 1], 1, ""))
# solver.Add(solver.Cumulative(postTasks, [1, 0], 1, ""))
-
# search setup and solving
dbInterval = solver.Phase(tasks + postTasks, solver.INTERVAL_DEFAULT)
- dbInt = solver.Phase(indicators, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
+ dbInt = solver.Phase(
+ indicators, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT
+ )
makespan = solver.Max([task1.EndExpr().Var(), task2.EndExpr().Var()])
optimize = solver.Optimize(False, makespan, 1)
@@ -56,20 +75,22 @@ def test_v0():
if collector.SolutionCount() > 0:
for i, task in enumerate(tasks):
- print("task {} runs from {} to {}".format(
- i,
- collector.StartValue(0, task),
- collector.EndValue(0, task)))
+ print(
+ "task {} runs from {} to {}".format(
+ i, collector.StartValue(0, task), collector.EndValue(0, task)
+ )
+ )
for i, task in enumerate(postTasks):
print("postTask {} starts at {}".format(i, collector.StartValue(0, task)))
for indicator in indicators:
- print('{} -> {}'.format(indicator.Name(), collector.Value(0, indicator)))
+ print("{} -> {}".format(indicator.Name(), collector.Value(0, indicator)))
else:
- print('No solution')
+ print("No solution")
+
def test_v1():
- print('test_v1')
- solver = pywrapcp.Solver('')
+ print("test_v1")
+ solver = pywrapcp.Solver("")
# we have two tasks of durations 4 and 7
task1 = solver.FixedDurationIntervalVar(0, 5, 4, False, "task1")
@@ -83,7 +104,6 @@ def test_v1():
task2_r2 = solver.FixedDurationIntervalVar(0, 5, 7, True, "task2_2")
tasks_r2 = [task1_r2, task2_r2]
-
# to each task, a post task of duration 64 is attached
postTask1 = solver.FixedDurationStartSyncedOnEndIntervalVar(task1, 64, 0)
postTask2 = solver.FixedDurationStartSyncedOnEndIntervalVar(task2, 64, 0)
@@ -95,14 +115,28 @@ def test_v1():
postTask1_r2 = solver.FixedDurationIntervalVar(4, 9, 64, True, "pTask1_2")
postTask2_r2 = solver.FixedDurationIntervalVar(4, 11, 64, True, "pTask2_2")
- copies = [ task1_r1, task2_r1, task1_r2, task2_r2,
- postTask1_r1, postTask1_r2, postTask2_r1, postTask2_r2 ]
+ copies = [
+ task1_r1,
+ task2_r1,
+ task1_r2,
+ task2_r2,
+ postTask1_r1,
+ postTask1_r2,
+ postTask2_r1,
+ postTask2_r2,
+ ]
# each resource cannot be used simultaneously by more than one post task
- solver.Add(solver.DisjunctiveConstraint(
- [task1_r1, task2_r1, postTask1_r1, postTask2_r1], "disj1"))
- solver.Add(solver.DisjunctiveConstraint(
- [task1_r2, task2_r2, postTask1_r2, postTask2_r2], "disj1"))
+ solver.Add(
+ solver.DisjunctiveConstraint(
+ [task1_r1, task2_r1, postTask1_r1, postTask2_r1], "disj1"
+ )
+ )
+ solver.Add(
+ solver.DisjunctiveConstraint(
+ [task1_r2, task2_r2, postTask1_r2, postTask2_r2], "disj1"
+ )
+ )
# Only one resource available
solver.Add(task1_r1.PerformedExpr() + task1_r2.PerformedExpr() == 1)
@@ -118,13 +152,17 @@ def test_v1():
# Indicators (no need to add both as they are constrained together)
indicators = [
- task1_r1.PerformedExpr(), task2_r1.PerformedExpr(),
- postTask1_r1.PerformedExpr(), postTask2_r1.PerformedExpr()]
+ task1_r1.PerformedExpr(),
+ task2_r1.PerformedExpr(),
+ postTask1_r1.PerformedExpr(),
+ postTask2_r1.PerformedExpr(),
+ ]
# search setup and solving
dbInterval = solver.Phase(tasks + postTasks, solver.INTERVAL_DEFAULT)
dbInt = solver.Phase(
- indicators, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
+ indicators, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT
+ )
makespan = solver.Max([task1.EndExpr(), task2.EndExpr()])
optimize = solver.Minimize(makespan, 1)
@@ -139,19 +177,26 @@ def test_v1():
solver.Solve(phase, [collector, optimize])
if collector.SolutionCount() > 0:
- print('solution with makespan', collector.ObjectiveValue(0))
+ print("solution with makespan", collector.ObjectiveValue(0))
for task in tasks:
- print("task {} runs from {} to {}".format(
- task.Name(),
- collector.StartValue(0, task),
- collector.EndValue(0, task)))
+ print(
+ "task {} runs from {} to {}".format(
+ task.Name(),
+ collector.StartValue(0, task),
+ collector.EndValue(0, task),
+ )
+ )
for task in postTasks:
- print("postTask {} starts at {}".format(
- task.Name(), collector.StartValue(0, task)))
+ print(
+ "postTask {} starts at {}".format(
+ task.Name(), collector.StartValue(0, task)
+ )
+ )
for task in copies:
print(task.Name(), collector.PerformedValue(0, task))
else:
- print('No solution')
+ print("No solution")
+
test_v0()
test_v1()
diff --git a/examples/tests/issue2.py b/examples/tests/issue2.py
index c6d36b1da22..9233d9584bc 100755
--- a/examples/tests/issue2.py
+++ b/examples/tests/issue2.py
@@ -3,6 +3,7 @@
# Control-C test. Hit Control-C during execution of this program.
+
def main():
solver = pywrapcp.Solver("time limit test")
n = 10
@@ -12,17 +13,15 @@ def main():
solution = solver.Assignment()
solution.Add(x)
- db = solver.Phase(x,
- solver.CHOOSE_FIRST_UNBOUND,
- solver.ASSIGN_MIN_VALUE)
+ db = solver.Phase(x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
time_limit = 2000
branch_limit = 100000000
failures_limit = 100000000
solutions_limit = 10000000
- limits = (
- solver.Limit(
- time_limit, branch_limit, failures_limit, solutions_limit, True))
+ limits = solver.Limit(
+ time_limit, branch_limit, failures_limit, solutions_limit, True
+ )
search_log = solver.SearchLog(1000)
assignment = solver.Assignment()
diff --git a/examples/tests/issue22.cs b/examples/tests/issue22.cs
index a1cffd7fb30..f928d8db075 100644
--- a/examples/tests/issue22.cs
+++ b/examples/tests/issue22.cs
@@ -17,6 +17,7 @@
using System.IO;
using System.Text.RegularExpressions;
using Google.OrTools.ConstraintSolver;
+using Google.OrTools.Routing;
public class Issue22Test
{
diff --git a/examples/tests/issue3.py b/examples/tests/issue3.py
index c74b1853dbc..9b8a865b696 100755
--- a/examples/tests/issue3.py
+++ b/examples/tests/issue3.py
@@ -18,7 +18,7 @@
from time import time
from random import randint
-#----------------helper for binpacking posting----------------
+# ----------------helper for binpacking posting----------------
def binpacking(cp, binvars, weights, loadvars):
@@ -33,36 +33,81 @@ def binpacking(cp, binvars, weights, loadvars):
cp.Add(solver.Sum([b[i] * weights[i] for i in range(nitems)]) == l[j])
cp.Add(solver.Sum(loadvars) == sum(weights))
-#------------------------------data reading-------------------
+
+# ------------------------------data reading-------------------
maxcapa = 44
weights = [4, 22, 9, 5, 8, 3, 3, 4, 7, 7, 3]
loss = [
- 0, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 1, 0, 2, 1, 0, 0, 0, 0, 2, 1, 0,
- 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 1, 0, 3, 2, 1, 0, 2, 1, 0, 0, 0]
+ 0,
+ 11,
+ 10,
+ 9,
+ 8,
+ 7,
+ 6,
+ 5,
+ 4,
+ 3,
+ 2,
+ 1,
+ 0,
+ 1,
+ 0,
+ 2,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 2,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0,
+ 2,
+ 1,
+ 0,
+ 3,
+ 2,
+ 1,
+ 0,
+ 2,
+ 1,
+ 0,
+ 0,
+ 0,
+]
nbslab = 11
-#------------------solver and variable declaration-------------
+# ------------------solver and variable declaration-------------
solver = pywrapcp.Solver('Steel Mill Slab')
-x = [solver.IntVar(0, nbslab-1, 'x' + str(i)) for i in range(nbslab)]
+x = [solver.IntVar(0, nbslab - 1, 'x' + str(i)) for i in range(nbslab)]
l = [solver.IntVar(0, maxcapa, 'l' + str(i)) for i in range(nbslab)]
obj = solver.IntVar(0, nbslab * maxcapa, 'obj')
-#-------------------post of the constraints--------------
+# -------------------post of the constraints--------------
binpacking(solver, x, weights[:nbslab], l)
-solver.Add(solver.Sum([solver.Element(loss, l[s])
- for s in range(nbslab)]) == obj)
+solver.Add(
+ solver.Sum([solver.Element(loss, l[s]) for s in range(nbslab)]) == obj
+)
sol = [2, 0, 0, 0, 0, 1, 2, 2, 1, 1, 2]
-#------------start the search and optimization-----------
+# ------------start the search and optimization-----------
objective = solver.Minimize(obj, 1)
-db = solver.Phase(x, solver.INT_VAR_DEFAULT,
- solver.INT_VALUE_DEFAULT)
+db = solver.Phase(x, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
# solver.NewSearch(db,[objective]) #segfault if I comment this
while solver.NextSolution():
diff --git a/examples/tests/issue4.py b/examples/tests/issue4.py
index 870328ad272..77f1d340b17 100755
--- a/examples/tests/issue4.py
+++ b/examples/tests/issue4.py
@@ -11,17 +11,15 @@ def main():
solution = solver.Assignment()
solution.Add(x)
- db = solver.Phase(x,
- solver.CHOOSE_FIRST_UNBOUND,
- solver.ASSIGN_MIN_VALUE)
+ db = solver.Phase(x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
time_limit = 2000
branch_limit = 100000000
failures_limit = 100000000
solutions_limit = 10000000
- limits = (
- solver.Limit(
- time_limit, branch_limit, failures_limit, solutions_limit, True))
+ limits = solver.Limit(
+ time_limit, branch_limit, failures_limit, solutions_limit, True
+ )
search_log = solver.SearchLog(1000)
diff --git a/examples/tests/issue46.py b/examples/tests/issue46.py
index 338cb72f373..a9adbc0ceb3 100755
--- a/examples/tests/issue46.py
+++ b/examples/tests/issue46.py
@@ -17,78 +17,81 @@
class AssignToStartMin(pywrapcp.PyDecisionBuilder):
- def __init__(self, intervals):
- pywrapcp.PyDecisionBuilder.__init__(self)
- self.__intervals = intervals
- def Next(self, solver):
- for interval in self.__intervals:
- interval.SetStartMax(interval.StartMin())
- return None
+ def __init__(self, intervals):
+ pywrapcp.PyDecisionBuilder.__init__(self)
+ self.__intervals = intervals
- def DebugString(self):
- return 'CustomDecisionBuilder'
+ def Next(self, solver):
+ for interval in self.__intervals:
+ interval.SetStartMax(interval.StartMin())
+ return None
+
+ def DebugString(self):
+ return 'CustomDecisionBuilder'
def NoSequence():
- print('NoSequence')
- solver = pywrapcp.Solver('Ordo')
- tasks = []
- [
- tasks.append(
- solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' % i))
- for i in range(3)
- ]
- print(tasks)
- disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
- solver.Add(disj)
- collector = solver.AllSolutionCollector()
- collector.Add(tasks)
- intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
- solver.Solve(intervalPhase, [collector])
- print(collector.SolutionCount())
- for i in range(collector.SolutionCount()):
- print("Solution ", i)
- print(collector.ObjectiveValue(i))
- print([collector.StartValue(i, tasks[j]) for j in range(3)])
- print([collector.EndValue(i, tasks[j]) for j in range(3)])
+ print('NoSequence')
+ solver = pywrapcp.Solver('Ordo')
+ tasks = []
+ [
+ tasks.append(
+ solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' % i)
+ )
+ for i in range(3)
+ ]
+ print(tasks)
+ disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
+ solver.Add(disj)
+ collector = solver.AllSolutionCollector()
+ collector.Add(tasks)
+ intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
+ solver.Solve(intervalPhase, [collector])
+ print(collector.SolutionCount())
+ for i in range(collector.SolutionCount()):
+ print('Solution ', i)
+ print(collector.ObjectiveValue(i))
+ print([collector.StartValue(i, tasks[j]) for j in range(3)])
+ print([collector.EndValue(i, tasks[j]) for j in range(3)])
def Sequence():
- print('Sequence')
- solver = pywrapcp.Solver('Ordo')
- tasks = []
- [
- tasks.append(
- solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' % i))
- for i in range(3)
- ]
- print(tasks)
- disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
- solver.Add(disj)
- sequence = []
- sequence.append(disj.SequenceVar())
- sequence[0].RankFirst(0)
- collector = solver.AllSolutionCollector()
- collector.Add(sequence)
- collector.Add(tasks)
- sequencePhase = solver.Phase(sequence, solver.SEQUENCE_DEFAULT)
- intervalPhase = AssignToStartMin(tasks)
- # intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
- mainPhase = solver.Compose([sequencePhase, intervalPhase])
- solver.Solve(mainPhase, [collector])
- print(collector.SolutionCount())
- for i in range(collector.SolutionCount()):
- print("Solution ", i)
- print(collector.ObjectiveValue(i))
- print([collector.StartValue(i, tasks[j]) for j in range(3)])
- print([collector.EndValue(i, tasks[j]) for j in range(3)])
+ print('Sequence')
+ solver = pywrapcp.Solver('Ordo')
+ tasks = []
+ [
+ tasks.append(
+ solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' % i)
+ )
+ for i in range(3)
+ ]
+ print(tasks)
+ disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
+ solver.Add(disj)
+ sequence = []
+ sequence.append(disj.SequenceVar())
+ sequence[0].RankFirst(0)
+ collector = solver.AllSolutionCollector()
+ collector.Add(sequence)
+ collector.Add(tasks)
+ sequencePhase = solver.Phase(sequence, solver.SEQUENCE_DEFAULT)
+ intervalPhase = AssignToStartMin(tasks)
+ # intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
+ mainPhase = solver.Compose([sequencePhase, intervalPhase])
+ solver.Solve(mainPhase, [collector])
+ print(collector.SolutionCount())
+ for i in range(collector.SolutionCount()):
+ print('Solution ', i)
+ print(collector.ObjectiveValue(i))
+ print([collector.StartValue(i, tasks[j]) for j in range(3)])
+ print([collector.EndValue(i, tasks[j]) for j in range(3)])
def main():
- NoSequence()
- Sequence()
+ NoSequence()
+ Sequence()
if __name__ == '__main__':
- main()
+ main()
diff --git a/examples/tests/issue5.py b/examples/tests/issue5.py
index d12a3e97c3c..082703f8d15 100755
--- a/examples/tests/issue5.py
+++ b/examples/tests/issue5.py
@@ -13,51 +13,51 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-'''
- A programming puzzle from Einav in Google CP Solver.
-
- From
- 'A programming puzzle from Einav'
- http://gcanyon.wordpress.com/2009/10/28/a-programming-puzzle-from-einav/
-
- My friend Einav gave me this programming puzzle to work on. Given
- this array of positive and negative numbers:
- 33 30 -10 -6 18 7 -11 -23 6
- ...
- -25 4 16 30 33 -23 -4 4 -23
-
- You can flip the sign of entire rows and columns, as many of them
- as you like. The goal is to make all the rows and columns sum to positive
- numbers (or zero), and then to find the solution (there are more than one)
- that has the smallest overall sum. So for example, for this array:
- 33 30 -10
- -16 19 9
- -17 -12 -14
- You could flip the sign for the bottom row to get this array:
- 33 30 -10
- -16 19 9
- 17 12 14
- Now all the rows and columns have positive sums, and the overall total is
- 108.
- But you could instead flip the second and third columns, and the second
- row, to get this array:
- 33 -30 10
- 16 19 9
- -17 12 14
- All the rows and columns still total positive, and the overall sum is just
- 66. So this solution is better (I don't know if it's the best)
- A pure brute force solution would have to try over 30 billion solutions.
- I wrote code to solve this in J. I'll post that separately.
-
- Compare with the following models:
- * MiniZinc http://www.hakank.org/minizinc/einav_puzzle.mzn
- * SICStus: http://hakank.org/sicstus/einav_puzzle.pl
-
-
- This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
- Also see my other Google CP Solver models:
- http://www.hakank.org/google_or_tools/
-'''
+"""
+A programming puzzle from Einav in Google CP Solver.
+
+From
+'A programming puzzle from Einav'
+http://gcanyon.wordpress.com/2009/10/28/a-programming-puzzle-from-einav/
+
+My friend Einav gave me this programming puzzle to work on. Given
+this array of positive and negative numbers:
+33 30 -10 -6 18 7 -11 -23 6
+...
+-25 4 16 30 33 -23 -4 4 -23
+
+You can flip the sign of entire rows and columns, as many of them
+as you like. The goal is to make all the rows and columns sum to positive
+numbers (or zero), and then to find the solution (there are more than one)
+that has the smallest overall sum. So for example, for this array:
+33 30 -10
+-16 19 9
+-17 -12 -14
+You could flip the sign for the bottom row to get this array:
+33 30 -10
+-16 19 9
+17 12 14
+Now all the rows and columns have positive sums, and the overall total is
+108.
+But you could instead flip the second and third columns, and the second
+row, to get this array:
+33 -30 10
+16 19 9
+-17 12 14
+All the rows and columns still total positive, and the overall sum is just
+66. So this solution is better (I don't know if it's the best)
+A pure brute force solution would have to try over 30 billion solutions.
+I wrote code to solve this in J. I'll post that separately.
+
+Compare with the following models:
+* MiniZinc http://www.hakank.org/minizinc/einav_puzzle.mzn
+* SICStus: http://hakank.org/sicstus/einav_puzzle.pl
+
+
+This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
+Also see my other Google CP Solver models:
+http://www.hakank.org/google_or_tools/
+"""
from ortools.constraint_solver import pywrapcp
@@ -71,39 +71,41 @@ def main():
#
# small problem
-# data = [
-# [ 33, 30, -10],
-# [-16, 19, 9],
-# [-17, -12, -14]
-# ]
-
- data = [[33, 30, 10, -6, 18, -7, -11, 23, -6],
- [16, -19, 9, -26, -8, -19, -8, -21, -14],
- [17, 12, -14, 31, -30, 13, -13, 19, 16],
- [-6, -11, 1, 17, -12, -4, -7, 14, -21],
- [18, -31, 34, -22, 17, -19, 20, 24, 6],
- [33, -18, 17, -15, 31, -5, 3, 27, -3],
- [-18, -20, -18, 31, 6, 4, -2, -12, 24],
- [27, 14, 4, -29, -3, 5, -29, 8, -12],
- [-15, -7, -23, 23, -9, -8, 6, 8, -12],
- [33, -23, -19, -4, -8, -7, 11, -12, 31],
- [-20, 19, -15, -30, 11, 32, 7, 14, -5],
- [-23, 18, -32, -2, -31, -7, 8, 24, 16],
- [32, -4, -10, -14, -6, -1, 0, 23, 23],
- [25, 0, -23, 22, 12, 28, -27, 15, 4],
- [-30, -13, -16, -3, -3, -32, -3, 27, -31],
- [22, 1, 26, 4, -2, -13, 26, 17, 14],
- [-9, -18, 3, -20, -27, -32, -11, 27, 13],
- [-17, 33, -7, 19, -32, 13, -31, -2, -24],
- [-31, 27, -31, -29, 15, 2, 29, -15, 33],
- [-18, -23, 15, 28, 0, 30, -4, 12, -32],
- [-3, 34, 27, -25, -18, 26, 1, 34, 26],
- [-21, -31, -10, -13, -30, -17, -12, -26, 31],
- [23, -31, -19, 21, -17, -10, 2, -23, 23],
- [-3, 6, 0, -3, -32, 0, -10, -25, 14],
- [-19, 9, 14, -27, 20, 15, -5, -27, 18],
- [11, -6, 24, 7, -17, 26, 20, -31, -25],
- [-25, 4, -16, 30, 33, 23, -4, -4, 23]]
+ # data = [
+ # [ 33, 30, -10],
+ # [-16, 19, 9],
+ # [-17, -12, -14]
+ # ]
+
+ data = [
+ [33, 30, 10, -6, 18, -7, -11, 23, -6],
+ [16, -19, 9, -26, -8, -19, -8, -21, -14],
+ [17, 12, -14, 31, -30, 13, -13, 19, 16],
+ [-6, -11, 1, 17, -12, -4, -7, 14, -21],
+ [18, -31, 34, -22, 17, -19, 20, 24, 6],
+ [33, -18, 17, -15, 31, -5, 3, 27, -3],
+ [-18, -20, -18, 31, 6, 4, -2, -12, 24],
+ [27, 14, 4, -29, -3, 5, -29, 8, -12],
+ [-15, -7, -23, 23, -9, -8, 6, 8, -12],
+ [33, -23, -19, -4, -8, -7, 11, -12, 31],
+ [-20, 19, -15, -30, 11, 32, 7, 14, -5],
+ [-23, 18, -32, -2, -31, -7, 8, 24, 16],
+ [32, -4, -10, -14, -6, -1, 0, 23, 23],
+ [25, 0, -23, 22, 12, 28, -27, 15, 4],
+ [-30, -13, -16, -3, -3, -32, -3, 27, -31],
+ [22, 1, 26, 4, -2, -13, 26, 17, 14],
+ [-9, -18, 3, -20, -27, -32, -11, 27, 13],
+ [-17, 33, -7, 19, -32, 13, -31, -2, -24],
+ [-31, 27, -31, -29, 15, 2, 29, -15, 33],
+ [-18, -23, 15, 28, 0, 30, -4, 12, -32],
+ [-3, 34, 27, -25, -18, 26, 1, 34, 26],
+ [-21, -31, -10, -13, -30, -17, -12, -26, 31],
+ [23, -31, -19, 21, -17, -10, 2, -23, 23],
+ [-3, 6, 0, -3, -32, 0, -10, -25, 14],
+ [-19, 9, 14, -27, 20, 15, -5, -27, 18],
+ [11, -6, 24, 7, -17, 26, 20, -31, -25],
+ [-25, 4, -16, 30, 33, 23, -4, -4, 23],
+ ]
rows = len(data)
cols = len(data[0])
@@ -116,10 +118,8 @@ def main():
for j in range(cols):
x[i, j] = solver.IntVar(-100, 100, 'x[%i,%i]' % (i, j))
- row_signs = [solver.IntVar([-1, 1], 'row_signs(%i)' % i)
- for i in range(rows)]
- col_signs = [solver.IntVar([-1, 1], 'col_signs(%i)' % j)
- for j in range(cols)]
+ row_signs = [solver.IntVar([-1, 1], 'row_signs(%i)' % i) for i in range(rows)]
+ col_signs = [solver.IntVar([-1, 1], 'col_signs(%i)' % j) for j in range(cols)]
#
# constraints
@@ -131,15 +131,17 @@ def main():
total_sum = solver.Sum([x[i, j] for i in range(rows) for j in range(cols)])
# row sums
- row_sums = [solver.Sum([x[i, j] for j in range(cols)]).Var()
- for i in range(rows)]
+ row_sums = [
+ solver.Sum([x[i, j] for j in range(cols)]).Var() for i in range(rows)
+ ]
# >= 0
for i in range(rows):
row_sums[i].SetMin(0)
# column sums
- col_sums = [solver.Sum([x[i, j] for i in range(rows)]).Var()
- for j in range(cols)]
+ col_sums = [
+ solver.Sum([x[i, j] for i in range(rows)]).Var() for j in range(cols)
+ ]
for j in range(cols):
col_sums[j].SetMin(0)
@@ -149,9 +151,11 @@ def main():
#
# search and result
#
- db = solver.Phase(col_signs + row_signs,
- solver.CHOOSE_FIRST_UNBOUND,
- solver.ASSIGN_MIN_VALUE)
+ db = solver.Phase(
+ col_signs + row_signs,
+ solver.CHOOSE_FIRST_UNBOUND,
+ solver.ASSIGN_MIN_VALUE,
+ )
search_log = solver.SearchLog(100000, total_sum)
solver.NewSearch(db, [objective, search_log])
@@ -164,7 +168,7 @@ def main():
print('col_sums:', [col_sums[j].Value() for j in range(cols)])
for i in range(rows):
for j in range(cols):
- print(x[i, j].Value(),', ')
+ print(x[i, j].Value(), ', ')
print('\n')
print('\n')
diff --git a/examples/tests/issue62.py b/examples/tests/issue62.py
index afb6e0b2caa..5cce214e7e3 100755
--- a/examples/tests/issue62.py
+++ b/examples/tests/issue62.py
@@ -3,26 +3,28 @@
def main():
- solver = pywrapcp.Solver('Ordo')
- tasks = [solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' %i)
- for i in range(3)]
- print(tasks)
- disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
- sequence = []
- sequence.append(disj.SequenceVar())
- solver.Add(disj)
- collector = solver.AllSolutionCollector()
- collector.Add(sequence)
- collector.Add(tasks)
- sequencePhase = solver.Phase(sequence, solver.SEQUENCE_DEFAULT)
- intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
- mainPhase = solver.Compose([sequencePhase, intervalPhase])
- solver.Solve(mainPhase, [ collector])
- print(collector.SolutionCount())
- for i in range(collector.SolutionCount()):
- print("Solution " , i)
- print([collector.StartValue(i, tasks[j]) for j in range(3)])
- print([collector.EndValue(i, tasks[j]) for j in range(3)])
+ solver = pywrapcp.Solver('Ordo')
+ tasks = [
+ solver.FixedDurationIntervalVar(0, 25, 5, False, 'Tasks%i' % i)
+ for i in range(3)
+ ]
+ print(tasks)
+ disj = solver.DisjunctiveConstraint(tasks, 'Disjunctive')
+ sequence = []
+ sequence.append(disj.SequenceVar())
+ solver.Add(disj)
+ collector = solver.AllSolutionCollector()
+ collector.Add(sequence)
+ collector.Add(tasks)
+ sequencePhase = solver.Phase(sequence, solver.SEQUENCE_DEFAULT)
+ intervalPhase = solver.Phase(tasks, solver.INTERVAL_DEFAULT)
+ mainPhase = solver.Compose([sequencePhase, intervalPhase])
+ solver.Solve(mainPhase, [collector])
+ print(collector.SolutionCount())
+ for i in range(collector.SolutionCount()):
+ print('Solution ', i)
+ print([collector.StartValue(i, tasks[j]) for j in range(3)])
+ print([collector.EndValue(i, tasks[j]) for j in range(3)])
if __name__ == '__main__':
diff --git a/makefiles/Makefile.cpp.mk b/makefiles/Makefile.cpp.mk
index 3d3e656ba0c..d26ede715f6 100644
--- a/makefiles/Makefile.cpp.mk
+++ b/makefiles/Makefile.cpp.mk
@@ -35,7 +35,7 @@ endif
BUILD_TYPE ?= Release
USE_COINOR ?= ON
USE_GLPK ?= OFF
-USE_HIGHS ?= OFF
+USE_HIGHS ?= ON
USE_PDLP := ON # OFF not supported
USE_SCIP ?= ON
USE_CPLEX ?= OFF
diff --git a/makefiles/Makefile.dotnet.mk b/makefiles/Makefile.dotnet.mk
index a8bc1d53996..0b95e2efad5 100644
--- a/makefiles/Makefile.dotnet.mk
+++ b/makefiles/Makefile.dotnet.mk
@@ -162,7 +162,7 @@ endif
cd $(TEMP_DOTNET_DIR)$S$1$S$$* && "$(DOTNET_BIN)" clean -c Release -v minimal
endef
-DOTNET_SAMPLES := init algorithms graph constraint_solver linear_solver sat util
+DOTNET_SAMPLES := init algorithms graph constraint_solver linear_solver routing sat util
$(foreach sample,$(DOTNET_SAMPLES),$(eval $(call dotnet-sample-target,$(sample))))
# Examples
@@ -307,7 +307,7 @@ endif
cd $(TEMP_DOTNET_DIR)$S$1$S$$* && "$(DOTNET_BIN)" clean -c Release -v minimal
endef
-DOTNET_TESTS := init algorithms graph constraint_solver linear_solver sat util
+DOTNET_TESTS := init algorithms graph constraint_solver linear_solver routing sat util
$(foreach test,$(DOTNET_TESTS),$(eval $(call dotnet-test-target,$(test))))
####################
diff --git a/makefiles/Makefile.java.mk b/makefiles/Makefile.java.mk
index e8e59a5cd0b..da28cac308f 100644
--- a/makefiles/Makefile.java.mk
+++ b/makefiles/Makefile.java.mk
@@ -176,7 +176,7 @@ rjava_%: \
cd $(TEMP_JAVA_DIR)$S$1$S$$* && "$(MVN_BIN)" exec:java $(ARGS)
endef
-JAVA_SAMPLES := init algorithms graph constraint_solver linear_solver sat util
+JAVA_SAMPLES := init algorithms graph constraint_solver linear_solver routing sat util
$(foreach sample,$(JAVA_SAMPLES),$(eval $(call java-sample-target,$(sample),$(subst _,,$(sample)))))
# Examples
@@ -275,7 +275,7 @@ rjava_%: \
cd $(TEMP_JAVA_DIR)$S$1$S$$* && "$(MVN_BIN)" test $(ARGS)
endef
-JAVA_TESTS := init algorithms graph constraint_solver linear_solver sat util
+JAVA_TESTS := init algorithms graph constraint_solver linear_solver routing sat util
$(foreach test,$(JAVA_TESTS),$(eval $(call java-test-target,$(test))))
####################
diff --git a/ortools/algorithms/BUILD.bazel b/ortools/algorithms/BUILD.bazel
index 4f4def32e75..ed81524193e 100644
--- a/ortools/algorithms/BUILD.bazel
+++ b/ortools/algorithms/BUILD.bazel
@@ -240,6 +240,7 @@ cc_test(
"//ortools/base:gmock_main",
"//ortools/util:time_limit",
"@abseil-cpp//absl/base:core_headers",
+ "@abseil-cpp//absl/types:span",
],
)
diff --git a/ortools/algorithms/knapsack_solver_test.cc b/ortools/algorithms/knapsack_solver_test.cc
index 1589f20e437..2c7572aead4 100644
--- a/ortools/algorithms/knapsack_solver_test.cc
+++ b/ortools/algorithms/knapsack_solver_test.cc
@@ -18,6 +18,7 @@
#include
#include "absl/base/macros.h"
+#include "absl/types/span.h"
#include "gtest/gtest.h"
#include "ortools/util/time_limit.h"
@@ -26,8 +27,8 @@ namespace {
const int kInvalidSolution = -1;
-bool IsSolutionValid(const std::vector& profits,
- const std::vector >& weights,
+bool IsSolutionValid(absl::Span profits,
+ absl::Span> weights,
const std::vector& capacities,
const std::vector& best_solution,
int64_t optimal_profit) {
@@ -59,7 +60,7 @@ int64_t SolveKnapsackProblemUsingSpecificSolverAndReduction(
std::vector profits(profit_array, profit_array + number_of_items);
std::vector capacities(capacity_array,
capacity_array + number_of_dimensions);
- std::vector > weights;
+ std::vector> weights;
for (int i = 0; i < number_of_dimensions; ++i) {
const int64_t* one_dimension = weight_array + number_of_items * i;
std::vector weights_one_dimension(one_dimension,
@@ -484,7 +485,7 @@ TEST(KnapsackSolverTest, SolveTwoDimensionsSettingPrimaryPropagator) {
std::vector profits(kProfitArray, kProfitArray + kArraySize);
std::vector capacities(kCapacityArray,
kCapacityArray + kNumberOfDimensions);
- std::vector > weights;
+ std::vector> weights;
for (int i = 0; i < kNumberOfDimensions; ++i) {
const int64_t* one_dimension = kWeightArray + kArraySize * i;
std::vector weights_one_dimension(one_dimension,
diff --git a/ortools/algorithms/n_choose_k_test.cc b/ortools/algorithms/n_choose_k_test.cc
index 3c5c86bfe3f..2d35c7c28df 100644
--- a/ortools/algorithms/n_choose_k_test.cc
+++ b/ortools/algorithms/n_choose_k_test.cc
@@ -28,14 +28,14 @@
#include "benchmark/benchmark.h"
#include "gtest/gtest.h"
#include "ortools/base/dump_vars.h"
-//#include "ortools/base/fuzztest.h"
+#include "ortools/base/fuzztest.h"
#include "ortools/base/gmock.h"
#include "ortools/base/mathutil.h"
#include "ortools/util/flat_matrix.h"
namespace operations_research {
namespace {
-//using ::fuzztest::NonNegative;
+using ::fuzztest::NonNegative;
using ::testing::HasSubstr;
using ::testing::status::IsOkAndHolds;
using ::testing::status::StatusIs;
@@ -271,12 +271,11 @@ void MatchesLogCombinations(int n, int k) {
<< " (value: " << approx << "), which fits in int64_t";
}
}
-/*
FUZZ_TEST(NChooseKTest, MatchesLogCombinations)
// Ideally we'd test with `uint64_t`, but `LogCombinations` only accepts
// `int`.
.WithDomains(NonNegative(), NonNegative());
-*/
+
template
void BM_NChooseK(benchmark::State& state) {
static constexpr int kNumInputs = 1000;
diff --git a/ortools/algorithms/python/knapsack_solver_test.py b/ortools/algorithms/python/knapsack_solver_test.py
index 95c990183b2..17ace59fd65 100755
--- a/ortools/algorithms/python/knapsack_solver_test.py
+++ b/ortools/algorithms/python/knapsack_solver_test.py
@@ -23,249 +23,247 @@
class PyWrapAlgorithmsKnapsackSolverTest(absltest.TestCase):
- def RealSolve(self, profits, weights, capacities, solver_type, use_reduction):
- solver = knapsack_solver.KnapsackSolver(solver_type, "solver")
- solver.set_use_reduction(use_reduction)
- solver.init(profits, weights, capacities)
- profit = solver.solve()
+ def RealSolve(self, profits, weights, capacities, solver_type, use_reduction):
+ solver = knapsack_solver.KnapsackSolver(solver_type, "solver")
+ solver.set_use_reduction(use_reduction)
+ solver.init(profits, weights, capacities)
+ profit = solver.solve()
- return profit
+ return profit
- def SolveKnapsackProblemUsingSpecificSolver(
- self, profits, weights, capacities, solver_type
- ):
- result_when_reduction = self.RealSolve(
- profits, weights, capacities, solver_type, True
- )
- result_when_no_reduction = self.RealSolve(
- profits, weights, capacities, solver_type, False
- )
+ def SolveKnapsackProblemUsingSpecificSolver(
+ self, profits, weights, capacities, solver_type
+ ):
+ result_when_reduction = self.RealSolve(
+ profits, weights, capacities, solver_type, True
+ )
+ result_when_no_reduction = self.RealSolve(
+ profits, weights, capacities, solver_type, False
+ )
- if result_when_reduction == result_when_no_reduction:
- return result_when_reduction
- else:
- return self._invalid_solution
+ if result_when_reduction == result_when_no_reduction:
+ return result_when_reduction
+ else:
+ return self._invalid_solution
- def SolveKnapsackProblem(self, profits, weights, capacities):
- self._invalid_solution = -1
- max_number_of_items_for_brute_force = 15
- max_number_of_items_for_divide_and_conquer = 32
- max_number_of_items_for_64_items_solver = 64
- number_of_items = len(profits)
- # This test is ran as size = 'small. To be fast enough, the dynamic
- # programming solver should be limited to instances with capacities smaller
- # than 10^6.
- max_capacity_for_dynamic_programming_solver = 1000000
- generic_profit = self.SolveKnapsackProblemUsingSpecificSolver(
- profits,
- weights,
- capacities,
- knapsack_solver.SolverType.KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER,
- )
+ def SolveKnapsackProblem(self, profits, weights, capacities):
+ self._invalid_solution = -1
+ max_number_of_items_for_brute_force = 15
+ max_number_of_items_for_divide_and_conquer = 32
+ max_number_of_items_for_64_items_solver = 64
+ number_of_items = len(profits)
+ # This test is ran as size = 'small. To be fast enough, the dynamic
+ # programming solver should be limited to instances with capacities smaller
+ # than 10^6.
+ max_capacity_for_dynamic_programming_solver = 1000000
+ generic_profit = self.SolveKnapsackProblemUsingSpecificSolver(
+ profits,
+ weights,
+ capacities,
+ knapsack_solver.SolverType.KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER,
+ )
- if generic_profit == self._invalid_solution:
- return self._invalid_solution
+ if generic_profit == self._invalid_solution:
+ return self._invalid_solution
- # Disabled due to ASAN raising a runtime error:
- # outside the range of representable values of type 'int'
- # cbc_profit = self.SolveKnapsackProblemUsingSpecificSolver(
- # profits,
- # weights,
- # capacities,
- # knapsack_solver.SolverType.
- # KNAPSACK_MULTIDIMENSION_CBC_MIP_SOLVER)
- # if cbc_profit != generic_profit:
- # return self._invalid_solution
+ # Disabled due to ASAN raising a runtime error:
+ # outside the range of representable values of type 'int'
+ # cbc_profit = self.SolveKnapsackProblemUsingSpecificSolver(
+ # profits,
+ # weights,
+ # capacities,
+ # knapsack_solver.SolverType.
+ # KNAPSACK_MULTIDIMENSION_CBC_MIP_SOLVER)
+ # if cbc_profit != generic_profit:
+ # return self._invalid_solution
- try:
- scip_profit = self.SolveKnapsackProblemUsingSpecificSolver(
- profits,
- weights,
- capacities,
- knapsack_solver.SolverType.KNAPSACK_MULTIDIMENSION_SCIP_MIP_SOLVER,
- )
- if scip_profit != generic_profit:
- return self._invalid_solution
- except AttributeError:
- print("SCIP support not compiled in")
+ try:
+ scip_profit = self.SolveKnapsackProblemUsingSpecificSolver(
+ profits,
+ weights,
+ capacities,
+ knapsack_solver.SolverType.KNAPSACK_MULTIDIMENSION_SCIP_MIP_SOLVER,
+ )
+ if scip_profit != generic_profit:
+ return self._invalid_solution
+ except AttributeError:
+ print("SCIP support not compiled in")
- if len(weights) > 1:
- return generic_profit
+ if len(weights) > 1:
+ return generic_profit
- if number_of_items <= max_number_of_items_for_brute_force:
- brute_force_profit = self.SolveKnapsackProblemUsingSpecificSolver(
- profits,
- weights,
- capacities,
- knapsack_solver.SolverType.KNAPSACK_BRUTE_FORCE_SOLVER,
- )
- if brute_force_profit != generic_profit:
- return self._invalid_solution
+ if number_of_items <= max_number_of_items_for_brute_force:
+ brute_force_profit = self.SolveKnapsackProblemUsingSpecificSolver(
+ profits,
+ weights,
+ capacities,
+ knapsack_solver.SolverType.KNAPSACK_BRUTE_FORCE_SOLVER,
+ )
+ if brute_force_profit != generic_profit:
+ return self._invalid_solution
- if number_of_items <= max_number_of_items_for_64_items_solver:
- items64_profit = self.SolveKnapsackProblemUsingSpecificSolver(
- profits,
- weights,
- capacities,
- knapsack_solver.SolverType.KNAPSACK_64ITEMS_SOLVER,
- )
- if items64_profit != generic_profit:
- return self._invalid_solution
+ if number_of_items <= max_number_of_items_for_64_items_solver:
+ items64_profit = self.SolveKnapsackProblemUsingSpecificSolver(
+ profits,
+ weights,
+ capacities,
+ knapsack_solver.SolverType.KNAPSACK_64ITEMS_SOLVER,
+ )
+ if items64_profit != generic_profit:
+ return self._invalid_solution
- if capacities[0] <= max_capacity_for_dynamic_programming_solver:
- dynamic_programming_profit = self.SolveKnapsackProblemUsingSpecificSolver(
- profits,
- weights,
- capacities,
- knapsack_solver.SolverType.KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER,
- )
- if dynamic_programming_profit != generic_profit:
- return self._invalid_solution
+ if capacities[0] <= max_capacity_for_dynamic_programming_solver:
+ dynamic_programming_profit = self.SolveKnapsackProblemUsingSpecificSolver(
+ profits,
+ weights,
+ capacities,
+ knapsack_solver.SolverType.KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER,
+ )
+ if dynamic_programming_profit != generic_profit:
+ return self._invalid_solution
- if number_of_items <= max_number_of_items_for_divide_and_conquer:
- divide_and_conquer_profit = self.SolveKnapsackProblemUsingSpecificSolver(
- profits,
- weights,
- capacities,
- knapsack_solver.SolverType.KNAPSACK_DIVIDE_AND_CONQUER_SOLVER,
- )
- if divide_and_conquer_profit != generic_profit:
- return self._invalid_solution
+ if number_of_items <= max_number_of_items_for_divide_and_conquer:
+ divide_and_conquer_profit = self.SolveKnapsackProblemUsingSpecificSolver(
+ profits,
+ weights,
+ capacities,
+ knapsack_solver.SolverType.KNAPSACK_DIVIDE_AND_CONQUER_SOLVER,
+ )
+ if divide_and_conquer_profit != generic_profit:
+ return self._invalid_solution
- return generic_profit
+ return generic_profit
- def testSolveOneDimension(self):
- profits = [1, 2, 3, 4, 5, 6, 7, 8, 9]
- weights = [[1, 2, 3, 4, 5, 6, 7, 8, 9]]
- capacities = [34]
- optimal_profit = 34
- profit = self.SolveKnapsackProblem(profits, weights, capacities)
- self.assertEqual(optimal_profit, profit)
+ def testSolveOneDimension(self):
+ profits = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+ weights = [[1, 2, 3, 4, 5, 6, 7, 8, 9]]
+ capacities = [34]
+ optimal_profit = 34
+ profit = self.SolveKnapsackProblem(profits, weights, capacities)
+ self.assertEqual(optimal_profit, profit)
- def testSolveTwoDimensions(self):
- profits = [1, 2, 3, 4, 5, 6, 7, 8, 9]
- weights = [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1]]
- capacities = [34, 4]
- optimal_profit = 30
- profit = self.SolveKnapsackProblem(profits, weights, capacities)
- self.assertEqual(optimal_profit, profit)
+ def testSolveTwoDimensions(self):
+ profits = [1, 2, 3, 4, 5, 6, 7, 8, 9]
+ weights = [[1, 2, 3, 4, 5, 6, 7, 8, 9], [1, 1, 1, 1, 1, 1, 1, 1, 1]]
+ capacities = [34, 4]
+ optimal_profit = 30
+ profit = self.SolveKnapsackProblem(profits, weights, capacities)
+ self.assertEqual(optimal_profit, profit)
- def testSolveBigOneDimension(self):
- profits = [
- 360,
- 83,
- 59,
- 130,
- 431,
- 67,
- 230,
- 52,
- 93,
- 125,
- 670,
- 892,
- 600,
- 38,
- 48,
- 147,
- 78,
- 256,
- 63,
- 17,
- 120,
- 164,
- 432,
- 35,
- 92,
- 110,
- 22,
- 42,
- 50,
- 323,
- 514,
- 28,
- 87,
- 73,
- 78,
- 15,
- 26,
- 78,
- 210,
- 36,
- 85,
- 189,
- 274,
- 43,
- 33,
- 10,
- 19,
- 389,
- 276,
- 312,
- ]
- weights = [
- [
- 7,
- 0,
- 30,
- 22,
- 80,
- 94,
- 11,
- 81,
- 70,
- 64,
- 59,
- 18,
- 0,
- 36,
- 3,
- 8,
- 15,
- 42,
- 9,
- 0,
- 42,
- 47,
- 52,
- 32,
- 26,
- 48,
- 55,
- 6,
- 29,
- 84,
- 2,
- 4,
- 18,
- 56,
- 7,
- 29,
- 93,
- 44,
- 71,
- 3,
- 86,
- 66,
- 31,
- 65,
- 0,
- 79,
- 20,
- 65,
- 52,
- 13,
- ]
- ]
- capacities = [850]
- optimal_profit = 7534
- profit = self.SolveKnapsackProblem(profits, weights, capacities)
- self.assertEqual(optimal_profit, profit)
+ def testSolveBigOneDimension(self):
+ profits = [
+ 360,
+ 83,
+ 59,
+ 130,
+ 431,
+ 67,
+ 230,
+ 52,
+ 93,
+ 125,
+ 670,
+ 892,
+ 600,
+ 38,
+ 48,
+ 147,
+ 78,
+ 256,
+ 63,
+ 17,
+ 120,
+ 164,
+ 432,
+ 35,
+ 92,
+ 110,
+ 22,
+ 42,
+ 50,
+ 323,
+ 514,
+ 28,
+ 87,
+ 73,
+ 78,
+ 15,
+ 26,
+ 78,
+ 210,
+ 36,
+ 85,
+ 189,
+ 274,
+ 43,
+ 33,
+ 10,
+ 19,
+ 389,
+ 276,
+ 312,
+ ]
+ weights = [[
+ 7,
+ 0,
+ 30,
+ 22,
+ 80,
+ 94,
+ 11,
+ 81,
+ 70,
+ 64,
+ 59,
+ 18,
+ 0,
+ 36,
+ 3,
+ 8,
+ 15,
+ 42,
+ 9,
+ 0,
+ 42,
+ 47,
+ 52,
+ 32,
+ 26,
+ 48,
+ 55,
+ 6,
+ 29,
+ 84,
+ 2,
+ 4,
+ 18,
+ 56,
+ 7,
+ 29,
+ 93,
+ 44,
+ 71,
+ 3,
+ 86,
+ 66,
+ 31,
+ 65,
+ 0,
+ 79,
+ 20,
+ 65,
+ 52,
+ 13,
+ ]]
+ capacities = [850]
+ optimal_profit = 7534
+ profit = self.SolveKnapsackProblem(profits, weights, capacities)
+ self.assertEqual(optimal_profit, profit)
def main(_):
- absltest.main()
+ absltest.main()
if __name__ == "__main__":
- app.run(main)
+ app.run(main)
diff --git a/ortools/algorithms/samples/knapsack.py b/ortools/algorithms/samples/knapsack.py
index eb63388c4ef..84da3b67962 100644
--- a/ortools/algorithms/samples/knapsack.py
+++ b/ortools/algorithms/samples/knapsack.py
@@ -16,58 +16,59 @@
# [START program]
# [START import]
from ortools.algorithms.python import knapsack_solver
+
# [END import]
def main():
- # Create the solver.
- # [START solver]
- solver = knapsack_solver.KnapsackSolver(
- knapsack_solver.SolverType.KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER,
- "KnapsackExample",
- )
- # [END solver]
+ # Create the solver.
+ # [START solver]
+ solver = knapsack_solver.KnapsackSolver(
+ knapsack_solver.SolverType.KNAPSACK_MULTIDIMENSION_BRANCH_AND_BOUND_SOLVER,
+ "KnapsackExample",
+ )
+ # [END solver]
- # [START data]
- values = [
- # fmt:off
+ # [START data]
+ values = [
+ # fmt:off
360, 83, 59, 130, 431, 67, 230, 52, 93, 125, 670, 892, 600, 38, 48, 147,
78, 256, 63, 17, 120, 164, 432, 35, 92, 110, 22, 42, 50, 323, 514, 28,
87, 73, 78, 15, 26, 78, 210, 36, 85, 189, 274, 43, 33, 10, 19, 389, 276,
312
- # fmt:on
- ]
- weights = [
- # fmt: off
+ # fmt:on
+ ]
+ weights = [
+ # fmt: off
[7, 0, 30, 22, 80, 94, 11, 81, 70, 64, 59, 18, 0, 36, 3, 8, 15, 42, 9, 0,
42, 47, 52, 32, 26, 48, 55, 6, 29, 84, 2, 4, 18, 56, 7, 29, 93, 44, 71,
3, 86, 66, 31, 65, 0, 79, 20, 65, 52, 13],
- # fmt: on
- ]
- capacities = [850]
- # [END data]
+ # fmt: on
+ ]
+ capacities = [850]
+ # [END data]
- # [START solve]
- solver.init(values, weights, capacities)
- computed_value = solver.solve()
- # [END solve]
+ # [START solve]
+ solver.init(values, weights, capacities)
+ computed_value = solver.solve()
+ # [END solve]
- # [START print_solution]
- packed_items = []
- packed_weights = []
- total_weight = 0
- print("Total value =", computed_value)
- for i in range(len(values)):
- if solver.best_solution_contains(i):
- packed_items.append(i)
- packed_weights.append(weights[0][i])
- total_weight += weights[0][i]
- print("Total weight:", total_weight)
- print("Packed items:", packed_items)
- print("Packed_weights:", packed_weights)
- # [END print_solution]
+ # [START print_solution]
+ packed_items = []
+ packed_weights = []
+ total_weight = 0
+ print("Total value =", computed_value)
+ for i in range(len(values)):
+ if solver.best_solution_contains(i):
+ packed_items.append(i)
+ packed_weights.append(weights[0][i])
+ total_weight += weights[0][i]
+ print("Total weight:", total_weight)
+ print("Packed items:", packed_items)
+ print("Packed_weights:", packed_weights)
+ # [END print_solution]
if __name__ == "__main__":
- main()
+ main()
# [END program]
diff --git a/ortools/algorithms/samples/simple_knapsack_program.py b/ortools/algorithms/samples/simple_knapsack_program.py
index 03ab5729d3c..0e5f0c5546d 100644
--- a/ortools/algorithms/samples/simple_knapsack_program.py
+++ b/ortools/algorithms/samples/simple_knapsack_program.py
@@ -16,45 +16,46 @@
"""A simple knapsack problem."""
# [START import]
from ortools.algorithms.python import knapsack_solver
+
# [END import]
def main():
- # Create the solver.
- # [START solver]
- solver = knapsack_solver.KnapsackSolver(
- knapsack_solver.SolverType.KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER,
- "test",
- )
- # [END solver]
-
- # [START data]
- weights = [
- # fmt:off
+ # Create the solver.
+ # [START solver]
+ solver = knapsack_solver.KnapsackSolver(
+ knapsack_solver.SolverType.KNAPSACK_DYNAMIC_PROGRAMMING_SOLVER,
+ "test",
+ )
+ # [END solver]
+
+ # [START data]
+ weights = [
+ # fmt:off
[565, 406, 194, 130, 435, 367, 230, 315, 393, 125, 670, 892, 600, 293, 712, 147, 421, 255],
- # fmt:on
- ]
- capacities = [850]
- values = weights[0]
- # [END data]
+ # fmt:on
+ ]
+ capacities = [850]
+ values = weights[0]
+ # [END data]
- # [START solve]
- solver.init(values, weights, capacities)
- computed_value = solver.solve()
- # [END solve]
+ # [START solve]
+ solver.init(values, weights, capacities)
+ computed_value = solver.solve()
+ # [END solve]
- # [START print_solution]
- packed_items = [
- x for x in range(0, len(weights[0])) if solver.best_solution_contains(x)
- ]
- packed_weights = [weights[0][i] for i in packed_items]
+ # [START print_solution]
+ packed_items = [
+ x for x in range(0, len(weights[0])) if solver.best_solution_contains(x)
+ ]
+ packed_weights = [weights[0][i] for i in packed_items]
- print("Packed items: ", packed_items)
- print("Packed weights: ", packed_weights)
- print("Total weight (same as total value): ", computed_value)
- # [END print_solution]
+ print("Packed items: ", packed_items)
+ print("Packed weights: ", packed_weights)
+ print("Total weight (same as total value): ", computed_value)
+ # [END print_solution]
if __name__ == "__main__":
- main()
+ main()
# [END program]
diff --git a/ortools/base/BUILD.bazel b/ortools/base/BUILD.bazel
index f5750c3e04b..568647b0e26 100644
--- a/ortools/base/BUILD.bazel
+++ b/ortools/base/BUILD.bazel
@@ -74,8 +74,8 @@ cc_library(
"version.h",
],
copts = [
- "-DOR_TOOLS_MAJOR=9",
- "-DOR_TOOLS_MINOR=13",
+ "-DOR_TOOLS_MAJOR=10",
+ "-DOR_TOOLS_MINOR=0",
"-DOR_TOOLS_PATCH=9999",
],
linkopts = select({
@@ -183,6 +183,8 @@ cc_library(
"//conditions:default": [],
}),
deps = [
+ ":strong_int",
+ ":strong_vector",
"@abseil-cpp//absl/container:inlined_vector",
],
)
@@ -199,27 +201,13 @@ cc_test(
}),
deps = [
":dump_vars",
+ ":strong_int",
+ ":strong_vector",
"@abseil-cpp//absl/strings",
"@googletest//:gtest_main",
],
)
-cc_library(
- name = "dynamic_library",
- hdrs = ["dynamic_library.h"],
- linkopts = select({
- "on_linux": ["-Wl,--no-as-needed -ldl"],
- "on_macos": [],
- "on_windows": [],
- "//conditions:default": [],
- }),
- deps = [
- ":base",
- ":logging",
- "@abseil-cpp//absl/strings",
- ],
-)
-
cc_library(
name = "encodingutils",
hdrs = ["encodingutils.h"],
diff --git a/ortools/base/dump_vars.h b/ortools/base/dump_vars.h
index 8413948cd39..b2814c2e539 100644
--- a/ortools/base/dump_vars.h
+++ b/ortools/base/dump_vars.h
@@ -48,6 +48,8 @@
#include
#include "absl/container/inlined_vector.h"
+#include "ortools/base/strong_int.h"
+#include "ortools/base/strong_vector.h"
/* need extra level to force extra eval */
#define DUMP_FOR_EACH_N0(F)
@@ -138,6 +140,16 @@ std::ostream& operator<<(std::ostream& os, const ::std::optional& opt) {
return os;
}
+// needed by graph tests
+template
+std::ostream& operator<<(std::ostream& os,
+ const ::util_intops::StrongVector& vec) {
+ for (U it : vec) {
+ os << ::std::to_string(it) << ',';
+ }
+ return os;
+}
+
using DumpNames = ::std::vector<::std::string>;
struct print_fields {
diff --git a/ortools/base/dump_vars_test.cc b/ortools/base/dump_vars_test.cc
index 1a295f386a5..81b4e5ae8d7 100644
--- a/ortools/base/dump_vars_test.cc
+++ b/ortools/base/dump_vars_test.cc
@@ -21,6 +21,12 @@
#include
#include "gtest/gtest.h"
+#include "ortools/base/strong_int.h"
+#include "ortools/base/strong_vector.h"
+
+namespace util_intops {
+DEFINE_STRONG_INT_TYPE(CustomStrongInt, uint32_t);
+} // namespace util_intops
namespace operations_research::base {
namespace {
@@ -124,6 +130,19 @@ TEST(DumpVars, Vector) {
EXPECT_EQ("vec = 49.299999,3.140000,", DUMP_VARS(vec).str());
}
+TEST(DumpVars, StrongInt) {
+ ::util_intops::CustomStrongInt val(42);
+ EXPECT_EQ(R"(val = 42)", ToString(DUMP_VARS(val)));
+ EXPECT_EQ(R"(val = 42)", DUMP_VARS(val).str());
+}
+
+TEST(DumpVars, StrongVector) {
+ ::util_intops::StrongVector<::util_intops::CustomStrongInt, float> vec = {
+ 49.3, 3.14};
+ EXPECT_EQ(R"(vec = 49.299999,3.140000,)", ToString(DUMP_VARS(vec)));
+ EXPECT_EQ(R"(vec = 49.299999,3.140000,)", DUMP_VARS(vec).str());
+}
+
TEST(DumpVars, Optional) {
std::optional of = {};
EXPECT_EQ("of = (none)", ToString(DUMP_VARS(of)));
diff --git a/ortools/base/proto_enum_utils.h b/ortools/base/proto_enum_utils.h
index a78dd61a72c..bdf03310564 100644
--- a/ortools/base/proto_enum_utils.h
+++ b/ortools/base/proto_enum_utils.h
@@ -175,8 +175,19 @@ namespace internal {
template
class RepeatedEnumView {
public:
- class Iterator : public std::iterator {
+ class Iterator
+#if __cplusplus < 201703L
+ : public std::iterator
+#endif
+ {
public:
+ using difference_type = ptrdiff_t;
+ using value_type = E;
+#if __cplusplus >= 201703L
+ using iterator_category = std::input_iterator_tag;
+ using pointer = E*;
+ using reference = E&;
+#endif
explicit Iterator(RepeatedField::const_iterator ptr) : ptr_(ptr) {}
bool operator==(const Iterator& it) const { return ptr_ == it.ptr_; }
bool operator!=(const Iterator& it) const { return ptr_ != it.ptr_; }
diff --git a/ortools/constraint_solver/BUILD.bazel b/ortools/constraint_solver/BUILD.bazel
index 11956f4539e..33bf13b8134 100644
--- a/ortools/constraint_solver/BUILD.bazel
+++ b/ortools/constraint_solver/BUILD.bazel
@@ -186,7 +186,6 @@ cc_library(
":search_limit_cc_proto",
":search_stats_cc_proto",
":solver_parameters_cc_proto",
- ":routing_parameters_cc_proto",
"//ortools/base",
"//ortools/base:base_export",
"//ortools/base:bitmap",
@@ -230,265 +229,3 @@ cc_library(
"@abseil-cpp//absl/types:span",
],
)
-
-# ----- Routing and ArcRouting -----
-
-proto_library(
- name = "routing_enums_proto",
- srcs = ["routing_enums.proto"],
-)
-
-cc_proto_library(
- name = "routing_enums_cc_proto",
- deps = [":routing_enums_proto"],
-)
-
-java_proto_library(
- name = "routing_enums_java_proto",
- deps = [":routing_enums_proto"],
-)
-
-proto_library(
- name = "routing_ils_proto",
- srcs = ["routing_ils.proto"],
- deps = [":routing_enums_proto"],
-)
-
-cc_proto_library(
- name = "routing_ils_cc_proto",
- deps = [":routing_ils_proto"],
-)
-
-py_proto_library(
- name = "routing_ils_py_pb2",
- deps = [":routing_ils_proto"],
-)
-
-java_proto_library(
- name = "routing_ils_java_proto",
- deps = [":routing_ils_proto"],
-)
-
-proto_library(
- name = "routing_parameters_proto",
- srcs = ["routing_parameters.proto"],
- deps = [
- ":routing_enums_proto",
- ":routing_ils_proto",
- ":solver_parameters_proto",
- "//ortools/sat:sat_parameters_proto",
- "//ortools/util:optional_boolean_proto",
- "@protobuf//:duration_proto",
- ],
-)
-
-cc_proto_library(
- name = "routing_parameters_cc_proto",
- deps = [":routing_parameters_proto"],
-)
-
-java_proto_library(
- name = "routing_parameters_java_proto",
- deps = [":routing_parameters_proto"],
-)
-
-py_proto_library(
- name = "routing_parameters_py_pb2",
- deps = [":routing_parameters_proto"],
-)
-
-py_proto_library(
- name = "routing_enums_py_pb2",
- deps = [":routing_enums_proto"],
-)
-
-cc_library(
- name = "routing_parameters",
- srcs = ["routing_parameters.cc"],
- hdrs = ["routing_parameters.h"],
- deps = [
- ":cp",
- ":routing_enums_cc_proto",
- ":routing_ils_cc_proto",
- ":routing_parameters_cc_proto",
- ":routing_parameters_utils",
- ":solver_parameters_cc_proto",
- "//ortools/base",
- "//ortools/base:proto_enum_utils",
- "//ortools/base:protoutil",
- "//ortools/base:types",
- "//ortools/port:proto_utils",
- "//ortools/sat:sat_parameters_cc_proto",
- "//ortools/util:optional_boolean_cc_proto",
- "//ortools/util:testing_utils",
- "@abseil-cpp//absl/container:flat_hash_map",
- "@abseil-cpp//absl/strings",
- "@abseil-cpp//absl/strings:str_format",
- "@abseil-cpp//absl/time",
- "@protobuf//:protobuf",
- ],
-)
-
-cc_library(
- name = "routing_parameters_utils",
- srcs = ["routing_parameters_utils.cc"],
- hdrs = ["routing_parameters_utils.h"],
- deps = [
- ":routing_parameters_cc_proto",
- "//ortools/util:optional_boolean_cc_proto",
- "@abseil-cpp//absl/types:span",
- ],
-)
-
-cc_library(
- name = "routing_types",
- hdrs = ["routing_types.h"],
- deps = [
- "//ortools/base:int_type",
- "//ortools/util:piecewise_linear_function",
- ],
-)
-
-cc_library(
- name = "routing_utils",
- srcs = ["routing_utils.cc"],
- hdrs = ["routing_utils.h"],
- visibility = ["//visibility:public"],
- deps = [
- "//ortools/util:saturated_arithmetic",
- "@abseil-cpp//absl/log:check",
- "@abseil-cpp//absl/types:span",
- ],
-)
-
-cc_library(
- name = "routing_neighborhoods",
- srcs = ["routing_neighborhoods.cc"],
- hdrs = ["routing_neighborhoods.h"],
- visibility = ["//visibility:public"],
- deps = [
- ":cp",
- ":routing_types",
- ":routing_utils",
- "//ortools/base:types",
- "//ortools/util:bitset",
- "//ortools/util:saturated_arithmetic",
- "@abseil-cpp//absl/log:check",
- "@abseil-cpp//absl/types:span",
- ],
-)
-
-cc_library(
- name = "routing_index_manager",
- srcs = ["routing_index_manager.cc"],
- hdrs = ["routing_index_manager.h"],
- deps = [
- ":routing_types",
- "//ortools/base",
- "//ortools/base:strong_vector",
- "//ortools/base:types",
- "@abseil-cpp//absl/container:flat_hash_set",
- "@abseil-cpp//absl/log:check",
- "@abseil-cpp//absl/types:span",
- ],
-)
-
-cc_library(
- name = "routing",
- srcs = [
- "routing.cc",
- "routing_breaks.cc",
- "routing_constraints.cc",
- "routing_decision_builders.cc",
- "routing_filters.cc",
- "routing_flow.cc",
- "routing_ils.cc",
- "routing_insertion_lns.cc",
- "routing_lp_scheduling.cc",
- "routing_sat.cc",
- "routing_search.cc",
- ],
- hdrs = [
- "routing.h",
- "routing_constraints.h",
- "routing_decision_builders.h",
- "routing_filter_committables.h",
- "routing_filters.h",
- "routing_ils.h",
- "routing_insertion_lns.h",
- "routing_lp_scheduling.h",
- "routing_search.h",
- ],
- copts = select({
- "on_linux": [],
- "on_macos": [],
- "on_windows": ["/Zc:preprocessor"],
- "//conditions:default": [],
- }),
- deps = [
- ":cp",
- ":routing_enums_cc_proto",
- ":routing_index_manager",
- ":routing_neighborhoods",
- ":routing_parameters",
- ":routing_parameters_cc_proto",
- ":routing_types",
- ":routing_utils",
- ":solver_parameters_cc_proto",
- "//ortools/base",
- "//ortools/base:dump_vars",
- "//ortools/base:int_type",
- "//ortools/base:map_util",
- "//ortools/base:mathutil",
- "//ortools/base:protoutil",
- "//ortools/base:stl_util",
- "//ortools/base:strong_vector",
- "//ortools/base:types",
- "//ortools/glop:lp_solver",
- "//ortools/glop:parameters_cc_proto",
- "//ortools/graph",
- "//ortools/graph:christofides",
- "//ortools/graph:connected_components",
- "//ortools/graph:linear_assignment",
- "//ortools/graph:min_cost_flow",
- "//ortools/lp_data",
- "//ortools/lp_data:base",
- "//ortools/port:proto_utils",
- "//ortools/sat:cp_model_cc_proto",
- "//ortools/sat:cp_model_solver",
- "//ortools/sat:integer",
- "//ortools/sat:lp_utils",
- "//ortools/sat:model",
- "//ortools/sat:sat_parameters_cc_proto",
- "//ortools/util:bitset",
- "//ortools/util:flat_matrix",
- "//ortools/util:optional_boolean_cc_proto",
- "//ortools/util:piecewise_linear_function",
- "//ortools/util:range_minimum_query",
- "//ortools/util:range_query_function",
- "//ortools/util:saturated_arithmetic",
- "//ortools/util:sorted_interval_list",
- "//ortools/util:scheduling",
- "//ortools/util:time_limit",
- "@abseil-cpp//absl/algorithm:container",
- "@abseil-cpp//absl/base:core_headers",
- "@abseil-cpp//absl/container:btree",
- "@abseil-cpp//absl/container:flat_hash_map",
- "@abseil-cpp//absl/container:flat_hash_set",
- "@abseil-cpp//absl/container:inlined_vector",
- "@abseil-cpp//absl/flags:flag",
- "@abseil-cpp//absl/functional:bind_front",
- "@abseil-cpp//absl/hash",
- "@abseil-cpp//absl/log",
- "@abseil-cpp//absl/log:check",
- "@abseil-cpp//absl/log:die_if_null",
- "@abseil-cpp//absl/memory",
- "@abseil-cpp//absl/status:statusor",
- "@abseil-cpp//absl/strings",
- "@abseil-cpp//absl/strings:str_format",
- "@abseil-cpp//absl/time",
- "@abseil-cpp//absl/types:span",
- "@protobuf//:protobuf",
- ],
-)
-
diff --git a/ortools/constraint_solver/README.md b/ortools/constraint_solver/README.md
index 0a6688c8146..70a560091c1 100644
--- a/ortools/constraint_solver/README.md
+++ b/ortools/constraint_solver/README.md
@@ -34,26 +34,8 @@ important for performance.
## Routing solver
[Vehicle Routing](http://en.wikipedia.org/wiki/Vehicle_routing) is a useful
-extension that is implemented on top of the CP solver library.
-
-To begin, skim:
-
-* [routing.h](../constraint_solver/routing.h):
-The vehicle routing library lets one model and solve generic vehicle routing
-problems ranging from the Traveling Salesman Problem to more complex problems
-such as the Capacitated Vehicle Routing Problem with Time Windows.
-
-### Parameters
-
-* [routing_parameters.proto](../constraint_solver/routing_parameters.proto):
-The Vehicle Routing solver parameters.
-* [routing_enums.proto](../constraint_solver/routing_enums.proto):
-Enums used to define routing parameters.
-
-### Solution
-
-* [assignment.proto](assignment.proto):
-Holds the solution of a Routing problem.
+extension that is implemented on top of the CP solver library. It is now
+available as [a separate module](../routing/README.md).
## Recipes
diff --git a/ortools/constraint_solver/constraint_solver.cc b/ortools/constraint_solver/constraint_solver.cc
index d739d2744c9..de6ebe007f2 100644
--- a/ortools/constraint_solver/constraint_solver.cc
+++ b/ortools/constraint_solver/constraint_solver.cc
@@ -1024,7 +1024,7 @@ class Search {
bool AtSolution();
bool AcceptSolution();
void NoMoreSolutions();
- bool LocalOptimum();
+ bool ContinueAtLocalOptimum();
bool AcceptDelta(Assignment* delta, Assignment* deltadelta);
void AcceptNeighbor();
void AcceptUncheckedNeighbor();
@@ -1316,15 +1316,15 @@ bool Search::AtSolution() {
void Search::NoMoreSolutions() { CALL_EVENT_LISTENERS(NoMoreSolutions); }
-bool Search::LocalOptimum() {
- bool at_local_optimum = false;
+bool Search::ContinueAtLocalOptimum() {
+ bool continue_at_local_optimum = false;
for (SearchMonitor* const monitor :
GetEventListeners(Solver::MonitorEvent::kLocalOptimum)) {
- if (monitor->LocalOptimum()) {
- at_local_optimum = true;
+ if (monitor->AtLocalOptimum()) {
+ continue_at_local_optimum = true;
}
}
- return at_local_optimum;
+ return continue_at_local_optimum;
}
bool Search::AcceptDelta(Assignment* delta, Assignment* deltadelta) {
@@ -1375,7 +1375,9 @@ void Search::Accept(ModelVisitor* const visitor) const {
#undef CALL_EVENT_LISTENERS
-bool LocalOptimumReached(Search* search) { return search->LocalOptimum(); }
+bool ContinueAtLocalOptimum(Search* search) {
+ return search->ContinueAtLocalOptimum();
+}
bool AcceptDelta(Search* search, Assignment* delta, Assignment* deltadelta) {
return search->AcceptDelta(delta, deltadelta);
@@ -2894,7 +2896,7 @@ void SearchMonitor::EndInitialPropagation() {}
bool SearchMonitor::AcceptSolution() { return true; }
bool SearchMonitor::AtSolution() { return false; }
void SearchMonitor::NoMoreSolutions() {}
-bool SearchMonitor::LocalOptimum() { return false; }
+bool SearchMonitor::AtLocalOptimum() { return false; }
bool SearchMonitor::AcceptDelta([[maybe_unused]] Assignment* delta,
[[maybe_unused]] Assignment* deltadelta) {
return true;
@@ -3252,6 +3254,10 @@ std::string Solver::SearchContext(const Search* search) const {
return search->search_context();
}
+bool Solver::AcceptSolution(Search* search) const {
+ return search->AcceptSolution();
+}
+
Assignment* Solver::GetOrCreateLocalSearchState() {
if (local_search_state_ == nullptr) {
local_search_state_ = std::make_unique(this);
diff --git a/ortools/constraint_solver/constraint_solver.h b/ortools/constraint_solver/constraint_solver.h
index 523e82fe969..3040a32089b 100644
--- a/ortools/constraint_solver/constraint_solver.h
+++ b/ortools/constraint_solver/constraint_solver.h
@@ -3145,6 +3145,7 @@ class Solver {
void SetSearchContext(Search* search, absl::string_view search_context);
std::string SearchContext() const;
std::string SearchContext(const Search* search) const;
+ bool AcceptSolution(Search* search) const;
/// Returns (or creates) an assignment representing the state of local search.
// TODO(user): Investigate if this should be moved to Search.
Assignment* GetOrCreateLocalSearchState();
@@ -3975,9 +3976,9 @@ class SearchMonitor : public BaseObject {
/// When the search tree is finished.
virtual void NoMoreSolutions();
- /// When a local optimum is reached. If 'true' is returned, the last solution
- /// is discarded and the search proceeds with the next one.
- virtual bool LocalOptimum();
+ /// Called when a local optimum is reached. If 'true' is returned, the last
+ /// solution is discarded and the search proceeds with the next one.
+ virtual bool AtLocalOptimum();
///
virtual bool AcceptDelta(Assignment* delta, Assignment* deltadelta);
diff --git a/ortools/constraint_solver/csharp/CMakeLists.txt b/ortools/constraint_solver/csharp/CMakeLists.txt
index f0b1d4067ec..c852e752cb1 100644
--- a/ortools/constraint_solver/csharp/CMakeLists.txt
+++ b/ortools/constraint_solver/csharp/CMakeLists.txt
@@ -11,18 +11,18 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-set_property(SOURCE routing.i PROPERTY CPLUSPLUS ON)
-set_property(SOURCE routing.i PROPERTY SWIG_MODULE_NAME operations_research_constraint_solver)
-set_property(SOURCE routing.i PROPERTY COMPILE_DEFINITIONS
+set_property(SOURCE constraint_solver.i PROPERTY CPLUSPLUS ON)
+set_property(SOURCE constraint_solver.i PROPERTY SWIG_MODULE_NAME ConstraintSolverGlobals)
+set_property(SOURCE constraint_solver.i PROPERTY COMPILE_DEFINITIONS
${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=)
-set_property(SOURCE routing.i PROPERTY COMPILE_OPTIONS
+set_property(SOURCE constraint_solver.i PROPERTY COMPILE_OPTIONS
-namespace ${DOTNET_PROJECT}.ConstraintSolver
-dllimport google-ortools-native)
swig_add_library(dotnet_constraint_solver
TYPE OBJECT
LANGUAGE csharp
OUTPUT_DIR ${DOTNET_PROJECT_DIR}/ortools/constraint_solver
- SOURCES routing.i)
+ SOURCES constraint_solver.i)
#target_include_directories(dotnet_constraint_solver PRIVATE ${DOTNET_INCLUDE_DIRS})
set_target_properties(dotnet_constraint_solver PROPERTIES
diff --git a/ortools/constraint_solver/csharp/ConstraintSolverTests.cs b/ortools/constraint_solver/csharp/ConstraintSolverTests.cs
index f4d9c47ffed..d471e1e8e1d 100644
--- a/ortools/constraint_solver/csharp/ConstraintSolverTests.cs
+++ b/ortools/constraint_solver/csharp/ConstraintSolverTests.cs
@@ -14,7 +14,7 @@
using System;
using Xunit;
using Google.OrTools.ConstraintSolver;
-using static Google.OrTools.ConstraintSolver.operations_research_constraint_solver;
+using static Google.OrTools.ConstraintSolver.ConstraintSolverGlobals;
namespace Google.OrTools.Tests
{
diff --git a/ortools/constraint_solver/csharp/constraint_solver.i b/ortools/constraint_solver/csharp/constraint_solver.i
index e7d9b53671b..4bdac92a98b 100644
--- a/ortools/constraint_solver/csharp/constraint_solver.i
+++ b/ortools/constraint_solver/csharp/constraint_solver.i
@@ -34,7 +34,7 @@ class ConstraintSolverParameters;
class RegularLimitParameters;
} // namespace operations_research
-%module(directors="1") operations_research;
+%module(directors="1") ConstraintSolverGlobals;
#pragma SWIG nowarn=473
%{
@@ -947,13 +947,13 @@ PROTO_INPUT(operations_research::CpModel,
PROTO2_RETURN(operations_research::CpModel,
Google.OrTools.ConstraintSolver.CpModel)
-// Add needed import to operations_research_constraint_solver.cs
+// Add needed import to ConstraintSolverGlobals.cs
%pragma(csharp) moduleimports=%{
%}
namespace operations_research {
// Globals
-// IMPORTANT(user): Global will be placed in operations_research_constraint_solver.cs
+// IMPORTANT(user): Global will be placed in ConstraintSolverGlobals.cs
// Ignored:
%ignore FillValues;
} // namespace operations_research
diff --git a/ortools/constraint_solver/docs/CP.md b/ortools/constraint_solver/docs/CP.md
index 2865706a0fc..9909a28221d 100644
--- a/ortools/constraint_solver/docs/CP.md
+++ b/ortools/constraint_solver/docs/CP.md
@@ -12,12 +12,13 @@ Java and .Net. Each language have different requirements for the code samples.
### C++ code samples
```cpp
+// Snippet from ortools/constraint_solver/samples/simple_cp_program.cc
#include
#include
+#include "ortools/base/init_google.h"
#include "absl/base/log_severity.h"
#include "absl/log/globals.h"
-#include "ortools/base/init_google.h"
#include "ortools/constraint_solver/constraint_solver.h"
namespace operations_research {
@@ -73,56 +74,57 @@ int main(int argc, char* argv[]) {
### Python code samples
```python
-#!/usr/bin/env python3
+# Snippet from ortools/constraint_solver/samples/simple_cp_program.py
"""Simple Constraint optimization example."""
from ortools.constraint_solver import pywrapcp
def main():
- """Entry point of the program."""
- # Instantiate the solver.
- solver = pywrapcp.Solver("CPSimple")
-
- # Create the variables.
- num_vals = 3
- x = solver.IntVar(0, num_vals - 1, "x")
- y = solver.IntVar(0, num_vals - 1, "y")
- z = solver.IntVar(0, num_vals - 1, "z")
-
- # Constraint 0: x != y.
- solver.Add(x != y)
- print("Number of constraints: ", solver.Constraints())
-
- # Solve the problem.
- decision_builder = solver.Phase(
- [x, y, z], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
- )
-
- # Print solution on console.
- count = 0
- solver.NewSearch(decision_builder)
- while solver.NextSolution():
- count += 1
- solution = f"Solution {count}:\n"
- for var in [x, y, z]:
- solution += f" {var.Name()} = {var.Value()}"
- print(solution)
- solver.EndSearch()
- print(f"Number of solutions found: {count}")
-
- print("Advanced usage:")
- print(f"Problem solved in {solver.WallTime()}ms")
- print(f"Memory usage: {pywrapcp.Solver.MemoryUsage()}bytes")
+ """Entry point of the program."""
+ # Instantiate the solver.
+ solver = pywrapcp.Solver("CPSimple")
+
+ # Create the variables.
+ num_vals = 3
+ x = solver.IntVar(0, num_vals - 1, "x")
+ y = solver.IntVar(0, num_vals - 1, "y")
+ z = solver.IntVar(0, num_vals - 1, "z")
+
+ # Constraint 0: x != y.
+ solver.Add(x != y)
+ print("Number of constraints: ", solver.Constraints())
+
+ # Solve the problem.
+ decision_builder = solver.Phase(
+ [x, y, z], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+
+ # Print solution on console.
+ count = 0
+ solver.NewSearch(decision_builder)
+ while solver.NextSolution():
+ count += 1
+ solution = f"Solution {count}:\n"
+ for var in [x, y, z]:
+ solution += f" {var.Name()} = {var.Value()}"
+ print(solution)
+ solver.EndSearch()
+ print(f"Number of solutions found: {count}")
+
+ print("Advanced usage:")
+ print(f"Problem solved in {solver.WallTime()}ms")
+ print(f"Memory usage: {pywrapcp.Solver.MemoryUsage()}bytes")
if __name__ == "__main__":
- main()
+ main()
```
### Java code samples
```java
+// Snippet from ortools/constraint_solver/samples/SimpleCpProgram.java
package com.google.ortools.constraintsolver.samples;
import com.google.ortools.Loader;
import com.google.ortools.constraintsolver.DecisionBuilder;
@@ -148,74 +150,78 @@ public class SimpleCpProgram {
final IntVar z = solver.makeIntVar(0, numVals - 1, "z");
// Constraint 0: x != y..
- solver.addConstraint(solver.makeAllDifferent(new IntVar[] {x, y}));
+ solver.addConstraint(solver.makeAllDifferent(new IntVar[]{x, y}));
logger.info("Number of constraints: " + solver.constraints());
// Solve the problem.
final DecisionBuilder db = solver.makePhase(
- new IntVar[] {x, y, z}, Solver.CHOOSE_FIRST_UNBOUND, Solver.ASSIGN_MIN_VALUE);
+ new IntVar[]{x, y, z},
+ Solver.CHOOSE_FIRST_UNBOUND,
+ Solver.ASSIGN_MIN_VALUE);
// Print solution on console.
int count = 0;
solver.newSearch(db);
while (solver.nextSolution()) {
++count;
- logger.info(
- String.format("Solution: %d\n x=%d y=%d z=%d", count, x.value(), y.value(), z.value()));
+ logger.info(String.format("Solution: %d\n x=%d y=%d z=%d"
+ , count
+ , x.value()
+ , y.value()
+ , z.value()));
}
solver.endSearch();
logger.info("Number of solutions found: " + solver.solutions());
- logger.info(String.format("Advanced usage:\nProblem solved in %d ms\nMemory usage: %d bytes",
- solver.wallTime(), Solver.memoryUsage()));
+ logger.info(String.format(
+ "Advanced usage:\nProblem solved in %d ms\nMemory usage: %d bytes"
+ , solver.wallTime(), Solver.memoryUsage()));
}
}
```
### .Net code samples
-```cs
+```csharp
+// Snippet from ortools/constraint_solver/samples/SimpleCpProgram.cs
using System;
using Google.OrTools.ConstraintSolver;
///
/// This is a simple CP program.
///
-public class SimpleCpProgram
-{
- public static void Main(String[] args)
- {
- // Instantiate the solver.
- Solver solver = new Solver("CpSimple");
-
- // Create the variables.
- const long numVals = 3;
- IntVar x = solver.MakeIntVar(0, numVals - 1, "x");
- IntVar y = solver.MakeIntVar(0, numVals - 1, "y");
- IntVar z = solver.MakeIntVar(0, numVals - 1, "z");
-
- // Constraint 0: x != y..
- solver.Add(solver.MakeAllDifferent(new IntVar[] { x, y }));
- Console.WriteLine($"Number of constraints: {solver.Constraints()}");
-
- // Solve the problem.
- DecisionBuilder db =
- solver.MakePhase(new IntVar[] { x, y, z }, Solver.CHOOSE_FIRST_UNBOUND, Solver.ASSIGN_MIN_VALUE);
-
- // Print solution on console.
- int count = 0;
- solver.NewSearch(db);
- while (solver.NextSolution())
- {
- ++count;
- Console.WriteLine($"Solution: {count}\n x={x.Value()} y={y.Value()} z={z.Value()}");
- }
- solver.EndSearch();
- Console.WriteLine($"Number of solutions found: {solver.Solutions()}");
-
- Console.WriteLine("Advanced usage:");
- Console.WriteLine($"Problem solved in {solver.WallTime()}ms");
- Console.WriteLine($"Memory usage: {Solver.MemoryUsage()}bytes");
+public class SimpleCpProgram {
+ public static void Main(String[] args) {
+ // Instantiate the solver.
+ Solver solver = new Solver("CpSimple");
+
+ // Create the variables.
+ const long numVals = 3;
+ IntVar x = solver.MakeIntVar(0, numVals - 1, "x");
+ IntVar y = solver.MakeIntVar(0, numVals - 1, "y");
+ IntVar z = solver.MakeIntVar(0, numVals - 1, "z");
+
+ // Constraint 0: x != y..
+ solver.Add(solver.MakeAllDifferent(new IntVar[] { x, y }));
+ Console.WriteLine($"Number of constraints: {solver.Constraints()}");
+
+ // Solve the problem.
+ DecisionBuilder db = solver.MakePhase(new IntVar[] { x, y, z }, Solver.CHOOSE_FIRST_UNBOUND,
+ Solver.ASSIGN_MIN_VALUE);
+
+ // Print solution on console.
+ int count = 0;
+ solver.NewSearch(db);
+ while (solver.NextSolution()) {
+ ++count;
+ Console.WriteLine($"Solution: {count}\n x={x.Value()} y={y.Value()} z={z.Value()}");
}
+ solver.EndSearch();
+ Console.WriteLine($"Number of solutions found: {solver.Solutions()}");
+
+ Console.WriteLine("Advanced usage:");
+ Console.WriteLine($"Problem solved in {solver.WallTime()}ms");
+ Console.WriteLine($"Memory usage: {Solver.MemoryUsage()}bytes");
+ }
}
```
diff --git a/ortools/constraint_solver/docs/README.md b/ortools/constraint_solver/docs/README.md
index c28d352b506..8ce4e8294bf 100644
--- a/ortools/constraint_solver/docs/README.md
+++ b/ortools/constraint_solver/docs/README.md
@@ -11,7 +11,7 @@ You can find here the documentation for the two following OR-Tools components.
**note:** We **strongly recommend** using the [CP-SAT solver](../../sat)
rather than the original CP solver.
-* [Routing Solver](ROUTING.md)
+* [Routing Solver](../../routing/docs/ROUTING.md)
A specialized library for identifying best vehicle routes given constraints.
diff --git a/ortools/constraint_solver/docs/routing_svg.py b/ortools/constraint_solver/docs/routing_svg.py
deleted file mode 100755
index 54f188a52c9..00000000000
--- a/ortools/constraint_solver/docs/routing_svg.py
+++ /dev/null
@@ -1,1219 +0,0 @@
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Generate SVG for a Routing problem."""
-
-# [START import]
-import argparse
-from ortools.constraint_solver import pywrapcp
-from ortools.constraint_solver import routing_enums_pb2
-# [END import]
-
-
-# [START data_model]
-class DataModel(object): # pylint: disable=too-many-instance-attributes
- """Stores the data for the problem."""
-
- def __init__(self, args):
- # Locations in block units
- locations = [
- (4, 4), # depot
- (2, 0),
- (8, 0), # locations to visit
- (0, 1),
- (1, 1),
- (5, 2),
- (7, 2),
- (3, 3),
- (6, 3),
- (5, 5),
- (8, 5),
- (1, 6),
- (2, 6),
- (3, 7),
- (6, 7),
- (0, 8),
- (7, 8),
- ]
- # Convert locations in meters using a city block dimension of 114m x 80m.
- self._locations = [(l[0] * 114, l[1] * 80) for l in locations]
- self._distance_matrix = [
- [
- 0,
- 548,
- 776,
- 696,
- 582,
- 274,
- 502,
- 194,
- 308,
- 194,
- 536,
- 502,
- 388,
- 354,
- 468,
- 776,
- 662,
- ],
- [
- 548,
- 0,
- 684,
- 308,
- 194,
- 502,
- 730,
- 354,
- 696,
- 742,
- 1084,
- 594,
- 480,
- 674,
- 1016,
- 868,
- 1210,
- ],
- [
- 776,
- 684,
- 0,
- 992,
- 878,
- 502,
- 274,
- 810,
- 468,
- 742,
- 400,
- 1278,
- 1164,
- 1130,
- 788,
- 1552,
- 754,
- ],
- [
- 696,
- 308,
- 992,
- 0,
- 114,
- 650,
- 878,
- 502,
- 844,
- 890,
- 1232,
- 514,
- 628,
- 822,
- 1164,
- 560,
- 1358,
- ],
- [
- 582,
- 194,
- 878,
- 114,
- 0,
- 536,
- 764,
- 388,
- 730,
- 776,
- 1118,
- 400,
- 514,
- 708,
- 1050,
- 674,
- 1244,
- ],
- [
- 274,
- 502,
- 502,
- 650,
- 536,
- 0,
- 228,
- 308,
- 194,
- 240,
- 582,
- 776,
- 662,
- 628,
- 514,
- 1050,
- 708,
- ],
- [
- 502,
- 730,
- 274,
- 878,
- 764,
- 228,
- 0,
- 536,
- 194,
- 468,
- 354,
- 1004,
- 890,
- 856,
- 514,
- 1278,
- 480,
- ],
- [
- 194,
- 354,
- 810,
- 502,
- 388,
- 308,
- 536,
- 0,
- 342,
- 388,
- 730,
- 468,
- 354,
- 320,
- 662,
- 742,
- 856,
- ],
- [
- 308,
- 696,
- 468,
- 844,
- 730,
- 194,
- 194,
- 342,
- 0,
- 274,
- 388,
- 810,
- 696,
- 662,
- 320,
- 1084,
- 514,
- ],
- [
- 194,
- 742,
- 742,
- 890,
- 776,
- 240,
- 468,
- 388,
- 274,
- 0,
- 342,
- 536,
- 422,
- 388,
- 274,
- 810,
- 468,
- ],
- [
- 536,
- 1084,
- 400,
- 1232,
- 1118,
- 582,
- 354,
- 730,
- 388,
- 342,
- 0,
- 878,
- 764,
- 730,
- 388,
- 1152,
- 354,
- ],
- [
- 502,
- 594,
- 1278,
- 514,
- 400,
- 776,
- 1004,
- 468,
- 810,
- 536,
- 878,
- 0,
- 114,
- 308,
- 650,
- 274,
- 844,
- ],
- [
- 388,
- 480,
- 1164,
- 628,
- 514,
- 662,
- 890,
- 354,
- 696,
- 422,
- 764,
- 114,
- 0,
- 194,
- 536,
- 388,
- 730,
- ],
- [
- 354,
- 674,
- 1130,
- 822,
- 708,
- 628,
- 856,
- 320,
- 662,
- 388,
- 730,
- 308,
- 194,
- 0,
- 342,
- 422,
- 536,
- ],
- [
- 468,
- 1016,
- 788,
- 1164,
- 1050,
- 514,
- 514,
- 662,
- 320,
- 274,
- 388,
- 650,
- 536,
- 342,
- 0,
- 764,
- 194,
- ],
- [
- 776,
- 868,
- 1552,
- 560,
- 674,
- 1050,
- 1278,
- 742,
- 1084,
- 810,
- 1152,
- 274,
- 388,
- 422,
- 764,
- 0,
- 798,
- ],
- [
- 662,
- 1210,
- 754,
- 1358,
- 1244,
- 708,
- 480,
- 856,
- 514,
- 468,
- 354,
- 844,
- 730,
- 536,
- 194,
- 798,
- 0,
- ],
- ]
- self._time_matrix = [
- [0, 6, 9, 8, 7, 3, 6, 2, 3, 2, 6, 6, 4, 4, 5, 9, 7],
- [6, 0, 8, 3, 2, 6, 8, 4, 8, 8, 13, 7, 5, 8, 12, 10, 14],
- [9, 8, 0, 11, 10, 6, 3, 9, 5, 8, 4, 15, 14, 13, 9, 18, 9],
- [8, 3, 11, 0, 1, 7, 10, 6, 10, 10, 14, 6, 7, 9, 14, 6, 16],
- [7, 2, 10, 1, 0, 6, 9, 4, 8, 9, 13, 4, 6, 8, 12, 8, 14],
- [3, 6, 6, 7, 6, 0, 2, 3, 2, 2, 7, 9, 7, 7, 6, 12, 8],
- [6, 8, 3, 10, 9, 2, 0, 6, 2, 5, 4, 12, 10, 10, 6, 15, 5],
- [2, 4, 9, 6, 4, 3, 6, 0, 4, 4, 8, 5, 4, 3, 7, 8, 10],
- [3, 8, 5, 10, 8, 2, 2, 4, 0, 3, 4, 9, 8, 7, 3, 13, 6],
- [2, 8, 8, 10, 9, 2, 5, 4, 3, 0, 4, 6, 5, 4, 3, 9, 5],
- [6, 13, 4, 14, 13, 7, 4, 8, 4, 4, 0, 10, 9, 8, 4, 13, 4],
- [6, 7, 15, 6, 4, 9, 12, 5, 9, 6, 10, 0, 1, 3, 7, 3, 10],
- [4, 5, 14, 7, 6, 7, 10, 4, 8, 5, 9, 1, 0, 2, 6, 4, 8],
- [4, 8, 13, 9, 8, 7, 10, 3, 7, 4, 8, 3, 2, 0, 4, 5, 6],
- [5, 12, 9, 14, 12, 6, 6, 7, 3, 3, 4, 7, 6, 4, 0, 9, 2],
- [9, 10, 18, 6, 8, 12, 15, 8, 13, 9, 13, 3, 4, 5, 9, 0, 9],
- [7, 14, 9, 16, 14, 8, 5, 10, 6, 5, 4, 10, 8, 6, 2, 9, 0],
- ]
- self._time_windows = [
- (0, 5), # depot
- (7, 12), # 1
- (10, 15), # 2
- (5, 14), # 3
- (5, 13), # 4
- (0, 5), # 5
- (5, 10), # 6
- (0, 10), # 7
- (5, 10), # 8
- (0, 5), # 9
- (10, 16), # 10
- (10, 15), # 11
- (0, 5), # 12
- (5, 10), # 13
- (7, 12), # 14
- (10, 15), # 15
- (5, 15), # 16
- ]
- if args["drop_nodes"]:
- self._demands = [0, 1, 1, 3, 6, 3, 6, 8, 8, 1, 2, 1, 2, 6, 6, 8, 8]
- else:
- self._demands = [0, 1, 1, 2, 4, 2, 4, 8, 8, 1, 2, 1, 2, 4, 4, 8, 8]
- self._pickups_deliveries = [
- [1, 6],
- [2, 10],
- [4, 3],
- [5, 9],
- [7, 8],
- [15, 11],
- [13, 12],
- [16, 14],
- ]
-
- if args["tsp"]:
- self._num_vehicles = 1
- else:
- self._num_vehicles = 4
- self._vehicle_capacities = [15, 15, 15, 15]
-
- if args["resources"]:
- self._vehicle_load_time = 5
- self._vehicle_unload_time = 5
-
- self._depot = 0
- self._depot_capacity = 2
- self._starts = [1, 2, 15, 16]
- self._ends = [0, 0, 0, 0]
-
- @property
- def locations(self):
- """Gets the locations."""
- return self._locations
-
- @property
- def distance_matrix(self):
- """Gets the distance matrix."""
- return self._distance_matrix
-
- @property
- def time_matrix(self):
- """Gets the time matrix."""
- return self._time_matrix
-
- @property
- def time_windows(self):
- """Gets the time windows."""
- return self._time_windows
-
- @property
- def demands(self):
- """Gets the locations demands."""
- return self._demands
-
- @property
- def pickups_deliveries(self):
- """Gets the pickups deliveries."""
- return self._pickups_deliveries
-
- @property
- def num_vehicles(self):
- """Gets the number of vehicles."""
- return self._num_vehicles
-
- @property
- def vehicle_capacities(self):
- """Gets the capacity of each vehicles."""
- return self._vehicle_capacities
-
- @property
- def vehicle_load_time(self):
- """Gets the load time of each vehicles."""
- return self._vehicle_load_time
-
- @property
- def vehicle_unload_time(self):
- """Gets the unload time of each vehicles."""
- return self._vehicle_unload_time
-
- @property
- def depot_capacity(self):
- """Gets the depot capacity."""
- return self._depot_capacity
-
- @property
- def depot(self):
- """Gets the depot node index."""
- return self._depot
-
- @property
- def starts(self):
- """Gets the start nodes indices."""
- return self._starts
-
- @property
- def ends(self):
- """Gets the end nodes indices."""
- return self._ends
-
- # [END data_model]
-
-
-###########
-# Printer #
-###########
-class GoogleColorPalette(object):
- """Google color codes palette."""
-
- def __init__(self):
- """Initialize Google ColorPalette."""
- self._colors = [
- ("blue", r"#4285F4"),
- ("red", r"#EA4335"),
- ("yellow", r"#FBBC05"),
- ("green", r"#34A853"),
- ("black", r"#101010"),
- ("white", r"#FFFFFF"),
- ]
-
- def __getitem__(self, key):
- """Gets color name from idx."""
- return self._colors[key][0]
-
- def __len__(self):
- """Gets the number of colors."""
- return len(self._colors)
-
- @property
- def colors(self):
- """Gets the colors list."""
- return self._colors
-
- def name(self, idx):
- """Return color name from idx."""
- return self._colors[idx][0]
-
- def value(self, idx):
- """Return color value from idx."""
- return self._colors[idx][1]
-
- def value_from_name(self, name):
- """Return color value from name."""
- return dict(self._colors)[name]
-
-
-class SVG(object):
- """SVG draw primitives."""
-
- @staticmethod
- def header(size, margin):
- """Writes header."""
- print(
- r''.format(
- width=size[0] + 2 * margin, height=size[1] + 2 * margin, margin=margin
- )
- )
-
- @staticmethod
- def definitions(colors):
- """Writes definitions."""
- print(
- r""
- )
- print(r"")
- for color in colors:
- print(
- r' '.format(colorname=color[0])
- )
- print(
- r' '.format(
- color=color[1]
- )
- )
- print(r" ")
- print(r" ")
-
- @staticmethod
- def footer():
- """Writes svg footer."""
- print(r" ")
-
- @staticmethod
- def draw_line(position_1, position_2, size, fg_color):
- """Draws a line."""
- line_style = (r'style="stroke-width:{sz};stroke:{fg};fill:none"').format(
- sz=size, fg=fg_color
- )
- print(
- r' '.format(
- x1=position_1[0],
- y1=position_1[1],
- x2=position_2[0],
- y2=position_2[1],
- style=line_style,
- )
- )
-
- @staticmethod
- def draw_polyline(position_1, position_2, size, fg_color, colorname):
- """Draws a line with arrow maker in the middle."""
- polyline_style = (
- r'style="stroke-width:{sz};stroke:{fg};fill:none;'
- 'marker-mid:url(#arrow_{colorname})"'
- ).format(sz=size, fg=fg_color, colorname=colorname)
- print(
- r' '.format(
- x1=position_1[0],
- y1=position_1[1],
- x2=(position_1[0] + position_2[0]) / 2,
- y2=(position_1[1] + position_2[1]) / 2,
- x3=position_2[0],
- y3=position_2[1],
- style=polyline_style,
- )
- )
-
- @staticmethod
- def draw_circle(position, radius, size, fg_color, bg_color="white"):
- """Print a circle."""
- circle_style = (r'style="stroke-width:{sz};stroke:{fg};fill:{bg}"').format(
- sz=size, fg=fg_color, bg=bg_color
- )
- print(
- r' '.format(
- cx=position[0], cy=position[1], r=radius, style=circle_style
- )
- )
-
- @staticmethod
- def draw_text(text, position, size, fg_color="none", bg_color="black"):
- """Print a middle centred text."""
- text_style = (
- r'style="text-anchor:middle;font-weight:bold;'
- 'font-size:{sz};stroke:{fg};fill:{bg}"'
- ).format(sz=size, fg=fg_color, bg=bg_color)
- print(
- r'{txt} '.format(
- x=position[0], y=position[1], dy=size / 3, style=text_style, txt=text
- )
- )
-
-
-class SVGPrinter(object): # pylint: disable=too-many-instance-attributes
- """Generate Problem as svg file to stdout."""
-
- # pylint: disable=too-many-arguments
- def __init__(self, args, data, manager=None, routing=None, assignment=None):
- """Initializes the printer."""
- self._args = args
- self._data = data
- self._manager = manager
- self._routing = routing
- self._assignment = assignment
- # Design variables
- self._color_palette = GoogleColorPalette()
- self._svg = SVG()
- # City block size 114mx80m
- self._radius = min(114, 80) / 3
- self._stroke_width = self._radius / 4
-
- @property
- def data(self):
- """Gets the Data Model."""
- return self._data
-
- @property
- def manager(self):
- """Gets the RoutingIndexManager."""
- return self._manager
-
- @property
- def routing(self):
- """Gets the Routing solver."""
- return self._routing
-
- @property
- def assignment(self):
- """Gets the assignment."""
- return self._assignment
-
- @property
- def color_palette(self):
- """Gets the color palette."""
- return self._color_palette
-
- @property
- def svg(self):
- """Gets the svg."""
- return self._svg
-
- def draw_grid(self):
- """Draws the city grid."""
- print(r"")
- color = "#969696"
- # Horizontal streets
- for i in range(9):
- p_1 = [0, i * 80]
- p_2 = [8 * 114, p_1[1]]
- self._svg.draw_line(p_1, p_2, 2, color)
- # Vertical streets
- for i in range(9):
- p_1 = [i * 114, 0]
- p_2 = [p_1[0], 8 * 80]
- self._svg.draw_line(p_1, p_2, 2, color)
-
- def draw_depot(self):
- """Draws the depot."""
- print(r"")
- color = self._color_palette.value_from_name("black")
- loc = self._data.locations[self._data.depot]
- self._svg.draw_circle(loc, self._radius, self._stroke_width, color, "white")
- self._svg.draw_text(self._data.depot, loc, self._radius, "none", color)
-
- def draw_depots(self):
- """Draws the depot."""
- print(r"")
- # print starts
- for vehicle_idx, start in enumerate(self._data.starts):
- del vehicle_idx
- color = self._color_palette.value_from_name("black")
- # color = self._color_palette.value(vehicle_idx)
- loc = self._data.locations[start]
- self._svg.draw_circle(loc, self._radius, self._stroke_width, color, "white")
- self._svg.draw_text(start, loc, self._radius, "none", color)
- # print end
- color = self._color_palette.value_from_name("black")
- loc = self._data.locations[0]
- self._svg.draw_circle(loc, self._radius, self._stroke_width, color, "white")
- self._svg.draw_text(0, loc, self._radius, "none", color)
-
- def draw_locations(self):
- """Draws all the locations but the depot."""
- print(r"")
- color = self._color_palette.value_from_name("blue")
- if not self._args["starts_ends"]:
- for idx, loc in enumerate(self._data.locations):
- if idx == self._data.depot:
- continue
- self._svg.draw_circle(
- loc, self._radius, self._stroke_width, color, "white"
- )
- self._svg.draw_text(idx, loc, self._radius, "none", color)
- else:
- for idx, loc in enumerate(self._data.locations):
- if idx in self._data.starts + self._data.ends:
- continue
- self._svg.draw_circle(
- loc, self._radius, self._stroke_width, color, "white"
- )
- self._svg.draw_text(idx, loc, self._radius, "none", color)
-
- def draw_demands(self):
- """Draws all the demands."""
- print(r"")
- for idx, loc in enumerate(self._data.locations):
- if idx == self._data.depot:
- continue
- demand = self._data.demands[idx]
- position = [
- x + y for x, y in zip(loc, [self._radius * 1.2, self._radius * 1.1])
- ]
- color = self._color_palette.value_from_name("red")
- # color = self._color_palette.value(int(math.log(demand, 2)))
- self._svg.draw_text(demand, position, self._radius, "none", color)
-
- def draw_pickups_deliveries(self):
- """Draws all pickups deliveries."""
- print(r"")
- colorname = "red"
- color = self._color_palette.value_from_name(colorname)
- for pickup_delivery in self._data.pickups_deliveries:
- self._svg.draw_polyline(
- self._data.locations[pickup_delivery[0]],
- self._data.locations[pickup_delivery[1]],
- self._stroke_width,
- color,
- colorname,
- )
-
- def draw_time_windows(self):
- """Draws all the time windows."""
- print(r"")
- for idx, loc in enumerate(self._data.locations):
- if idx == self._data.depot:
- continue
- time_window = self._data.time_windows[idx]
- position = [
- x + y for x, y in zip(loc, [self._radius * 0, -self._radius * 1.6])
- ]
- color = self._color_palette.value_from_name("red")
- self._svg.draw_text(
- "[{t1},{t2}]".format(t1=time_window[0], t2=time_window[1]),
- position,
- self._radius * 0.75,
- "white",
- color,
- )
-
- ##############
- ## ROUTES ##
- ##############
-
- def draw_drop_nodes(self):
- """Draws the dropped nodes."""
- print(r"")
- if self._assignment is None:
- print("")
- # Display dropped nodes.
- dropped_nodes = []
- for node in range(self._routing.Size()):
- if self._routing.IsStart(node) or self._routing.IsEnd(node):
- continue
- if self._assignment.Value(self._routing.NextVar(node)) == node:
- dropped_nodes.append(self._manager.IndexToNode(node))
- color = self._color_palette.value_from_name("black")
- for node_idx in dropped_nodes:
- loc = self._data.locations[node_idx]
- self._svg.draw_circle(loc, self._radius, self._stroke_width, color, "white")
- self._svg.draw_text(node_idx, loc, self._radius, "none", color)
-
- def routes(self):
- """Creates the route list from the assignment."""
- if self._assignment is None:
- print("")
- return []
- routes = []
- for vehicle_id in range(self._data.num_vehicles):
- index = self._routing.Start(vehicle_id)
- route = []
- while not self._routing.IsEnd(index):
- node_index = self._manager.IndexToNode(index)
- route.append(node_index)
- index = self._assignment.Value(self._routing.NextVar(index))
- node_index = self._manager.IndexToNode(index)
- route.append(node_index)
- routes.append(route)
- return routes
-
- def draw_route(self, route, color, colorname):
- """Draws a Route."""
- # First print route
- previous_loc_idx = None
- for loc_idx in route:
- if previous_loc_idx is not None and previous_loc_idx != loc_idx:
- self._svg.draw_polyline(
- self._data.locations[previous_loc_idx],
- self._data.locations[loc_idx],
- self._stroke_width,
- color,
- colorname,
- )
- previous_loc_idx = loc_idx
- # Then print location along the route
- for loc_idx in route:
- if loc_idx != self._data.depot:
- loc = self._data.locations[loc_idx]
- self._svg.draw_circle(
- loc, self._radius, self._stroke_width, color, "white"
- )
- self._svg.draw_text(loc_idx, loc, self._radius, "none", color)
-
- def draw_routes(self):
- """Draws the routes."""
- print(r"")
- for route_idx, route in enumerate(self.routes()):
- print(r"".format(idx=route_idx))
- color = self._color_palette.value(route_idx)
- colorname = self._color_palette.name(route_idx)
- self.draw_route(route, color, colorname)
-
- def tw_routes(self):
- """Creates the route time window list from the assignment."""
- if self._assignment is None:
- print("")
- return []
- time_dimension = self._routing.GetDimensionOrDie("Time")
- loc_routes = []
- tw_routes = []
- for vehicle_id in range(self._data.num_vehicles):
- index = self._routing.Start(vehicle_id)
- # index = self._assignment.Value(self._routing.NextVar(index))
- loc_route = []
- tw_route = []
- while True:
- node_index = self._manager.IndexToNode(index)
- loc_route.append(node_index)
- time_var = time_dimension.CumulVar(index)
- t_min = self._assignment.Min(time_var)
- t_max = self._assignment.Max(time_var)
- tw_route.append((t_min, t_max))
- if self._routing.IsEnd(index):
- break
- index = self._assignment.Value(self._routing.NextVar(index))
- loc_routes.append(loc_route)
- tw_routes.append(tw_route)
- return zip(loc_routes, tw_routes)
-
- def draw_tw_route(self, route_idx, locations, tw_route, color):
- """Draws the time windows for a Route."""
- is_start = -1
- for loc_idx, time_window in zip(locations, tw_route):
- loc = self._data.locations[loc_idx]
- if loc_idx == 0: # special case for depot
- position = [
- x + y
- for x, y in zip(
- loc, [self._radius * is_start, self._radius * (1.8 + route_idx)]
- )
- ]
- is_start = 1
- else:
- position = [
- x + y for x, y in zip(loc, [self._radius * 0, self._radius * 1.8])
- ]
- self._svg.draw_text(
- "[{t_min}]".format(t_min=time_window[0]),
- position,
- self._radius * 0.75,
- "white",
- color,
- )
-
- def draw_tw_routes(self):
- """Draws the time window routes."""
- print(r"")
- for route_idx, loc_tw in enumerate(self.tw_routes()):
- print(r"".format(route_idx))
- color = self._color_palette.value(route_idx)
- self.draw_tw_route(route_idx, loc_tw[0], loc_tw[1], color)
-
- def print_to_console(self):
- """Prints a full svg document on stdout."""
- margin = self._radius * 2 + 2
- size = [8 * 114, 8 * 80]
- self._svg.header(size, margin)
- self._svg.definitions(self._color_palette.colors)
- self.draw_grid()
- if not self._args["solution"]:
- if self._args["pickup_delivery"]:
- self.draw_pickups_deliveries()
- self.draw_locations()
- else:
- self.draw_routes()
- self.draw_drop_nodes()
- if self._args["starts_ends"]:
- self.draw_depots()
- else:
- self.draw_depot()
- if self._args["capacity"]:
- self.draw_demands()
- if self._args["drop_nodes"]:
- self.draw_demands()
- if self._args["time_windows"] or self._args["resources"]:
- self.draw_time_windows()
- if (self._args["time_windows"] or self._args["resources"]) and self._args[
- "solution"
- ]:
- self.draw_tw_routes()
- self._svg.footer()
-
-
-########
-# Main #
-########
-def main(): # pylint: disable=too-many-locals,too-many-branches
- """Entry point of the program."""
- parser = argparse.ArgumentParser(description="Output VRP as svg image.")
- parser.add_argument("-tsp", "--tsp", action="store_true", help="use 1 vehicle")
- parser.add_argument("-vrp", "--vrp", action="store_true", help="use 4 vehicle")
- parser.add_argument(
- "-gs", "--global-span", action="store_true", help="use global span constraints"
- )
- parser.add_argument(
- "-c", "--capacity", action="store_true", help="use capacity constraints"
- )
- parser.add_argument(
- "-r", "--resources", action="store_true", help="use resources constraints"
- )
- parser.add_argument(
- "-dn",
- "--drop-nodes",
- action="store_true",
- help="allow drop nodes (disjuntion constraints)",
- )
- parser.add_argument(
- "-tw", "--time-windows", action="store_true", help="use time-window constraints"
- )
- parser.add_argument(
- "-se", "--starts-ends", action="store_true", help="use multiple starts & ends"
- )
- parser.add_argument(
- "-pd",
- "--pickup-delivery",
- action="store_true",
- help="use pickup & delivery constraints",
- )
- parser.add_argument(
- "-fifo", "--fifo", action="store_true", help="use pickup & delivery FIFO Policy"
- )
- parser.add_argument(
- "-lifo", "--lifo", action="store_true", help="use pickup & delivery LIFO Policy"
- )
- parser.add_argument("-s", "--solution", action="store_true", help="print solution")
- args = vars(parser.parse_args())
-
- # Instantiate the data problem.
- # [START data]
- data = DataModel(args)
- # [END data]
-
- if not args["solution"]:
- # Print svg on cout
- printer = SVGPrinter(args, data)
- printer.print_to_console()
- return 0
-
- # Create the routing index manager.
- # [START index_manager]
- if args["starts_ends"]:
- manager = pywrapcp.RoutingIndexManager(
- len(data.locations), data.num_vehicles, data.starts, data.ends
- )
- else:
- manager = pywrapcp.RoutingIndexManager(
- len(data.locations), data.num_vehicles, data.depot
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
-
- # [END routing_model]
-
- # Register distance callback
- def distance_callback(from_index, to_index):
- """Returns the manhattan distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data.distance_matrix[from_node][to_node]
-
- distance_callback_index = routing.RegisterTransitCallback(distance_callback)
-
- # Register time callback
- def time_callback(from_index, to_index):
- """Returns the manhattan distance travel time between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data.time_matrix[from_node][to_node]
-
- time_callback_index = routing.RegisterTransitCallback(time_callback)
-
- # Register demands callback
- def demand_callback(from_index):
- """Returns the demand of the node."""
- # Convert from routing variable Index to demands NodeIndex.
- from_node = manager.IndexToNode(from_index)
- return data.demands[from_node]
-
- demand_callback_index = routing.RegisterUnaryTransitCallback(demand_callback)
-
- if args["time_windows"] or args["resources"]:
- routing.SetArcCostEvaluatorOfAllVehicles(time_callback_index)
- else:
- routing.SetArcCostEvaluatorOfAllVehicles(distance_callback_index)
-
- if args["global_span"] or args["pickup_delivery"]:
- dimension_name = "Distance"
- routing.AddDimension(distance_callback_index, 0, 3000, True, dimension_name)
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
-
- if args["capacity"] or args["drop_nodes"]:
- routing.AddDimensionWithVehicleCapacity(
- demand_callback_index, 0, data.vehicle_capacities, True, "Capacity"
- )
-
- if args["drop_nodes"]:
- # Allow to drop nodes.
- penalty = 1000
- for node in range(1, len(data.locations)):
- routing.AddDisjunction([manager.NodeToIndex(node)], penalty)
-
- if args["pickup_delivery"]:
- dimension_name = "Distance"
- routing.AddDimension(distance_callback_index, 0, 3000, True, dimension_name)
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
- for request in data.pickups_deliveries:
- pickup_index = manager.NodeToIndex(request[0])
- delivery_index = manager.NodeToIndex(request[1])
- routing.AddPickupAndDelivery(pickup_index, delivery_index)
- routing.solver().Add(
- routing.VehicleVar(pickup_index) == routing.VehicleVar(delivery_index)
- )
- routing.solver().Add(
- distance_dimension.CumulVar(pickup_index)
- <= distance_dimension.CumulVar(delivery_index)
- )
- if args["fifo"]:
- routing.SetPickupAndDeliveryPolicyOfAllVehicles(
- pywrapcp.RoutingModel.PICKUP_AND_DELIVERY_FIFO
- )
- if args["lifo"]:
- routing.SetPickupAndDeliveryPolicyOfAllVehicles(
- pywrapcp.RoutingModel.PICKUP_AND_DELIVERY_LIFO
- )
-
- if args["starts_ends"]:
- dimension_name = "Distance"
- routing.AddDimension(distance_callback_index, 0, 2000, True, dimension_name)
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
-
- time = "Time"
- if args["time_windows"] or args["resources"]:
- routing.AddDimension(time_callback_index, 30, 30, False, time)
- time_dimension = routing.GetDimensionOrDie(time)
- # Add time window constraints for each location except depot and 'copy' the
- # slack var in the solution object (aka Assignment) to print it.
- for location_idx, time_window in enumerate(data.time_windows):
- if location_idx == 0:
- continue
- index = manager.NodeToIndex(location_idx)
- time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
- routing.AddToAssignment(time_dimension.SlackVar(index))
- # Add time window constraints for each vehicle start node and 'copy' the
- # slack var in the solution object (aka Assignment) to print it.
- for vehicle_id in range(data.num_vehicles):
- index = routing.Start(vehicle_id)
- time_window = data.time_windows[0]
- time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
- routing.AddToAssignment(time_dimension.SlackVar(index))
-
- # Instantiate route start and end times to produce feasible times.
- for vehicle_id in range(data.num_vehicles):
- routing.AddVariableMinimizedByFinalizer(
- time_dimension.CumulVar(routing.End(vehicle_id))
- )
- routing.AddVariableMinimizedByFinalizer(
- time_dimension.CumulVar(routing.Start(vehicle_id))
- )
-
- if args["resources"]:
- # Add resource constraints at the depot.
- time_dimension = routing.GetDimensionOrDie(time)
- solver = routing.solver()
- intervals = []
- for i in range(data.num_vehicles):
- # Add loading time at start of routes
- intervals.append(
- solver.FixedDurationIntervalVar(
- time_dimension.CumulVar(routing.Start(i)),
- data.vehicle_load_time,
- "depot_interval",
- )
- )
- # Add unloading time at end of routes.
- intervals.append(
- solver.FixedDurationIntervalVar(
- time_dimension.CumulVar(routing.End(i)),
- data.vehicle_unload_time,
- "depot_interval ",
- )
- )
-
- depot_usage = [1 for i in range(data.num_vehicles * 2)]
- solver.AddConstraint(
- solver.Cumulative(intervals, depot_usage, data.depot_capacity, "depot")
- )
-
- # Setting first solution heuristic (cheapest addition).
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- # pylint: disable=no-member
- if not args["pickup_delivery"]:
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- else:
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION
- )
-
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.time_limit.FromSeconds(2)
-
- # Solve the problem.
- assignment = routing.SolveWithParameters(search_parameters)
- # Print the solution.
- printer = SVGPrinter(args, data, manager, routing, assignment)
- printer.print_to_console()
- return 0
-
-
-if __name__ == "__main__":
- main()
diff --git a/ortools/constraint_solver/java/CMakeLists.txt b/ortools/constraint_solver/java/CMakeLists.txt
index cd894902a16..e1116674378 100644
--- a/ortools/constraint_solver/java/CMakeLists.txt
+++ b/ortools/constraint_solver/java/CMakeLists.txt
@@ -11,17 +11,17 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-set_property(SOURCE routing.i PROPERTY CPLUSPLUS ON)
-set_property(SOURCE routing.i PROPERTY SWIG_MODULE_NAME main)
-set_property(SOURCE routing.i PROPERTY COMPILE_DEFINITIONS
+set_property(SOURCE constraint_solver.i PROPERTY CPLUSPLUS ON)
+set_property(SOURCE constraint_solver.i PROPERTY SWIG_MODULE_NAME Globals)
+set_property(SOURCE constraint_solver.i PROPERTY COMPILE_DEFINITIONS
${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=)
-set_property(SOURCE routing.i PROPERTY COMPILE_OPTIONS
+set_property(SOURCE constraint_solver.i PROPERTY COMPILE_OPTIONS
-package ${JAVA_PACKAGE}.constraintsolver)
swig_add_library(jniconstraint_solver
TYPE OBJECT
LANGUAGE java
OUTPUT_DIR ${JAVA_PROJECT_DIR}/${JAVA_SRC_PATH}/constraintsolver
- SOURCES routing.i)
+ SOURCES constraint_solver.i)
target_include_directories(jniconstraint_solver PRIVATE ${JNI_INCLUDE_DIRS})
set_target_properties(jniconstraint_solver PROPERTIES
diff --git a/ortools/constraint_solver/java/constraint_solver.i b/ortools/constraint_solver/java/constraint_solver.i
index 8c0d82e7ceb..88f8540460d 100644
--- a/ortools/constraint_solver/java/constraint_solver.i
+++ b/ortools/constraint_solver/java/constraint_solver.i
@@ -14,7 +14,7 @@
// TODO(user): Refactor this file to adhere to the SWIG style guide.
// Used for free functions.
-%module(directors="1") operations_research;
+%module(directors="1") Globals;
%include "enumsimple.swg"
%include "exception.i"
@@ -1558,7 +1558,7 @@ CONVERT_VECTOR(operations_research::SymmetryBreaker, SymmetryBreaker);
%rename (toString) *::DebugString;
%rename("%(lowercamelcase)s", %$isvariable) "";
-// Add needed import to mainJNI.java
+// Add needed import to GlobalsJNI.java
%pragma(java) jniclassimports=%{
// Used to wrap std::function
// see https://docs.oracle.com/javase/8/docs/api/java/util/function/Supplier.html
@@ -1622,14 +1622,14 @@ PROTO_INPUT(operations_research::RegularLimitParameters,
PROTO2_RETURN(operations_research::RegularLimitParameters,
com.google.ortools.constraintsolver.RegularLimitParameters)
-// Add needed import to main.java
+// Add needed import to Globals.java
%pragma(java) moduleimports=%{
%}
namespace operations_research {
// Globals
-// IMPORTANT(user): Globals will be placed in main.java
-// i.e. use `import com.[...].constraintsolver.main`
+// IMPORTANT(user): Globals will be placed in Globals.java
+// i.e. use `import com.[...].constraintsolver.Globals`
%ignore FillValues;
%rename (areAllBooleans) AreAllBooleans;
%rename (areAllBound) AreAllBound;
diff --git a/ortools/constraint_solver/local_search.cc b/ortools/constraint_solver/local_search.cc
index 92d4cbca9df..2a0f237af5c 100644
--- a/ortools/constraint_solver/local_search.cc
+++ b/ortools/constraint_solver/local_search.cc
@@ -63,8 +63,8 @@ namespace operations_research {
// Utility methods to ensure the communication between local search and the
// search.
-// Returns true if a local optimum has been reached and cannot be improved.
-bool LocalOptimumReached(Search* search);
+// Returns true if the search must continue after reaching the local optimum.
+bool ContinueAtLocalOptimum(Search* search);
// Returns true if the search accepts the delta (actually checking this by
// calling AcceptDelta on the monitors of the search).
@@ -4130,7 +4130,9 @@ Decision* FindOneNeighbor::Next(Solver* const solver) {
if (solutions_since_last_check_ >= check_period_) {
solutions_since_last_check_ = 0;
}
- const bool accept = !check_solution || solver->SolveAndCommit(restore);
+ const bool accept = !check_solution ||
+ (solver->SolveAndCommit(restore) &&
+ solver->AcceptSolution(solver->TopLevelSearch()));
solver->GetLocalSearchMonitor()->EndAcceptNeighbor(ls_operator_,
accept);
if (accept) {
@@ -4392,7 +4394,7 @@ class NestedSolveDecision : public Decision {
private:
DecisionBuilder* const db_;
- bool restore_;
+ const bool restore_;
std::vector monitors_;
int state_;
};
@@ -4647,15 +4649,21 @@ Decision* LocalSearch::Next(Solver* const solver) {
const int state = decision->state();
switch (state) {
case NestedSolveDecision::DECISION_FAILED: {
- const bool local_optimum_reached =
- LocalOptimumReached(solver->ActiveSearch());
- if (local_optimum_reached) {
+ // NOTE: The DECISION_FAILED state can be reached when no first solution
+ // was found by the solver, so we should only consider to be at a local
+ // optimum and call ContinueAtLocalOptimum() when we've reached the last
+ // nested decision.
+ const bool continue_at_local_optimum =
+ nested_decision_index_ == nested_decisions_.size() - 1 &&
+ ContinueAtLocalOptimum(solver->ActiveSearch());
+ if (continue_at_local_optimum) {
// A local optimum has been reached. The search will continue only if we
// accept up-hill moves (due to metaheuristics). In this case we need to
// reset neighborhood optimal routes.
ls_operator_->Reset();
}
- if (!local_optimum_reached || solver->IsUncheckedSolutionLimitReached()) {
+ if (!continue_at_local_optimum ||
+ solver->IsUncheckedSolutionLimitReached()) {
nested_decision_index_ = -1; // Stop the search
}
solver->Fail();
diff --git a/ortools/constraint_solver/python/CMakeLists.txt b/ortools/constraint_solver/python/CMakeLists.txt
index 702943a30e0..8fe4444aa78 100644
--- a/ortools/constraint_solver/python/CMakeLists.txt
+++ b/ortools/constraint_solver/python/CMakeLists.txt
@@ -11,16 +11,37 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-set_property(SOURCE routing.i PROPERTY CPLUSPLUS ON)
-set_property(SOURCE routing.i PROPERTY SWIG_MODULE_NAME pywrapcp)
-set_property(SOURCE routing.i PROPERTY COMPILE_DEFINITIONS
+# constraint_solver
+pybind11_add_module(constraint_solver_pybind11 MODULE constraint_solver.cc)
+set_target_properties(constraint_solver_pybind11 PROPERTIES
+ LIBRARY_OUTPUT_NAME "constraint_solver")
+
+# note: macOS is APPLE and also UNIX !
+if(APPLE)
+ set_target_properties(constraint_solver_pybind11 PROPERTIES
+ SUFFIX ".so"
+ INSTALL_RPATH "@loader_path;@loader_path/../../../${PYTHON_PROJECT}/.libs")
+elseif(UNIX)
+ set_target_properties(constraint_solver_pybind11 PROPERTIES
+ INSTALL_RPATH "$ORIGIN:$ORIGIN/../../../${PYTHON_PROJECT}/.libs")
+endif()
+
+target_link_libraries(constraint_solver_pybind11 PRIVATE
+ ${PROJECT_NAMESPACE}::ortools
+ pybind11_native_proto_caster)
+add_library(${PROJECT_NAMESPACE}::constraint_solver_pybind11 ALIAS constraint_solver_pybind11)
+
+# legacy pywrapcp
+set_property(SOURCE constraint_solver.i PROPERTY CPLUSPLUS ON)
+set_property(SOURCE constraint_solver.i PROPERTY SWIG_MODULE_NAME pywrapcp)
+set_property(SOURCE constraint_solver.i PROPERTY COMPILE_DEFINITIONS
${OR_TOOLS_COMPILE_DEFINITIONS} ABSL_MUST_USE_RESULT=)
-set_property(SOURCE routing.i PROPERTY COMPILE_OPTIONS -nofastunpack)
+set_property(SOURCE constraint_solver.i PROPERTY COMPILE_OPTIONS -nofastunpack)
swig_add_library(pywrapcp
TYPE MODULE
LANGUAGE python
OUTPUT_DIR ${PYTHON_PROJECT_DIR}/constraint_solver
- SOURCES routing.i)
+ SOURCES constraint_solver.i)
target_include_directories(pywrapcp PRIVATE ${Python3_INCLUDE_DIRS})
set_property(TARGET pywrapcp PROPERTY SWIG_USE_TARGET_INCLUDE_DIRECTORIES ON)
diff --git a/ortools/constraint_solver/python/constraint_solver.cc b/ortools/constraint_solver/python/constraint_solver.cc
new file mode 100644
index 00000000000..c771e67b18f
--- /dev/null
+++ b/ortools/constraint_solver/python/constraint_solver.cc
@@ -0,0 +1,334 @@
+// Copyright 2010-2025 Google LLC
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "ortools/constraint_solver/constraint_solver.h"
+
+#include // For FailureProtect. See below.
+
+#include
+#include
+#include
+#include
+
+#include "absl/strings/string_view.h"
+#include "ortools/constraint_solver/assignment.pb.h"
+#include "ortools/constraint_solver/python/constraint_solver_doc.h"
+#include "pybind11/cast.h"
+#include "pybind11/functional.h"
+#include "pybind11/gil.h"
+#include "pybind11/pybind11.h"
+#include "pybind11/stl.h"
+#include "pybind11_protobuf/native_proto_caster.h"
+
+using ::operations_research::Assignment;
+using ::operations_research::AssignmentProto;
+using ::operations_research::BaseObject;
+using ::operations_research::Constraint;
+using ::operations_research::ConstraintSolverParameters;
+using ::operations_research::DecisionBuilder;
+using ::operations_research::IntervalVar;
+using ::operations_research::IntExpr;
+using ::operations_research::IntVar;
+using ::operations_research::ModelVisitor;
+using ::operations_research::PropagationBaseObject;
+using ::operations_research::Solver;
+using ::pybind11::arg;
+
+// Used in the PROTECT_FROM_FAILURE macro. See below.
+namespace {
+
+struct FailureProtect {
+ jmp_buf exception_buffer;
+ void JumpBack() { longjmp(exception_buffer, 1); }
+};
+
+} // namespace
+
+#define PROTECT_FROM_FAILURE(this_, action) \
+ Solver* solver = this_->solver(); \
+ FailureProtect protect; \
+ solver->set_fail_intercept([&protect]() { protect.JumpBack(); }); \
+ if (setjmp(protect.exception_buffer) == 0) { \
+ this_->action; \
+ solver->clear_fail_intercept(); \
+ } else { \
+ solver->clear_fail_intercept(); \
+ throw pybind11::value_error("Solver fails outside of solve()"); \
+ }
+
+class BaseObjectPythonHelper {
+ public:
+ static std::string DebugString(BaseObject* this_) {
+ return this_->DebugString();
+ }
+};
+
+class PropagationBaseObjectPythonHelper : BaseObjectPythonHelper {
+ public:
+ static std::string DebugString(PropagationBaseObject* this_) {
+ return this_->DebugString();
+ }
+ static Solver* solver(PropagationBaseObject* this_) {
+ return this_->solver();
+ }
+
+ static std::string name(PropagationBaseObject* this_) {
+ return this_->name();
+ }
+
+ static void SetName(PropagationBaseObject* this_, absl::string_view name) {
+ this_->set_name(name);
+ }
+};
+
+class IntExprPythonHelper : PropagationBaseObjectPythonHelper {
+ public:
+ static int64_t Min(IntExpr* this_) { return this_->Min(); }
+ static int64_t Max(IntExpr* this_) { return this_->Max(); }
+ static void SetMin(IntExpr* this_, int64_t m) {
+ PROTECT_FROM_FAILURE(this_, SetMin(m));
+ }
+ static void SetMax(IntExpr* this_, int64_t m) {
+ PROTECT_FROM_FAILURE(this_, SetMax(m));
+ }
+ static void SetRange(IntExpr* this_, int64_t mi, int64_t ma) {
+ PROTECT_FROM_FAILURE(this_, SetRange(mi, ma));
+ }
+ static void SetValue(IntExpr* this_, int64_t v) {
+ PROTECT_FROM_FAILURE(this_, SetValue(v));
+ }
+ static bool Bound(IntExpr* this_) { return this_->Bound(); }
+};
+
+class IntVarPythonHelper : IntExprPythonHelper {
+ public:
+ static std::string name(IntVar* this_) { return this_->name(); }
+ static int64_t Value(IntVar* this_) { return this_->Value(); }
+ static void RemoveValue(IntVar* this_, int64_t v) {
+ PROTECT_FROM_FAILURE(this_, RemoveValue(v));
+ }
+ static int64_t Size(IntVar* this_) { return this_->Size(); }
+};
+
+PYBIND11_MODULE(constraint_solver, m) {
+ pybind11_protobuf::ImportNativeProtoCasters();
+
+ pybind11::class_(m, "Solver", DOC(operations_research, Solver))
+ .def(pybind11::init())
+ .def(pybind11::init())
+ .def("__str__", &Solver::DebugString)
+ .def("default_solver_parameters", &Solver::DefaultSolverParameters)
+ .def("parameters", &Solver::parameters)
+ .def("local_search_profile", &Solver::LocalSearchProfile)
+ .def("new_int_var",
+ pybind11::overload_cast(
+ &Solver::MakeIntVar),
+ DOC(operations_research, Solver, MakeIntVar),
+ pybind11::return_value_policy::reference_internal)
+ .def("new_int_var",
+ pybind11::overload_cast(&Solver::MakeIntVar),
+ DOC(operations_research, Solver, MakeIntVar),
+ pybind11::return_value_policy::reference_internal)
+ .def("new_int_var",
+ pybind11::overload_cast&,
+ const std::string&>(&Solver::MakeIntVar),
+ DOC(operations_research, Solver, MakeIntVar_2),
+ pybind11::return_value_policy::reference_internal)
+ .def("new_int_var",
+ pybind11::overload_cast&>(
+ &Solver::MakeIntVar),
+ DOC(operations_research, Solver, MakeIntVar_2),
+ pybind11::return_value_policy::reference_internal)
+ .def("add", &Solver::AddConstraint,
+ DOC(operations_research, Solver, AddConstraint), arg("c"))
+ .def("accept", &Solver::Accept, DOC(operations_research, Solver, Accept),
+ arg("visitor"))
+ .def("print_model_visitor", &Solver::MakePrintModelVisitor,
+ DOC(operations_research, Solver, MakePrintModelVisitor),
+ pybind11::return_value_policy::reference_internal);
+
+ pybind11::class_(m, "BaseObject",
+ DOC(operations_research, BaseObject))
+ .def("__str__", &BaseObjectPythonHelper::DebugString);
+
+ pybind11::class_(
+ m, "PropagationBaseObject",
+ DOC(operations_research, PropagationBaseObject))
+ .def_property("name", &PropagationBaseObjectPythonHelper::name,
+ &PropagationBaseObjectPythonHelper::SetName);
+
+ // Note: no ctor.
+ pybind11::class_(
+ m, "IntExpr", DOC(operations_research, IntExpr))
+ .def_property_readonly("min", &IntExprPythonHelper::Min,
+ DOC(operations_research, IntExpr, Min))
+ .def_property_readonly("max", &IntExprPythonHelper::Max,
+ DOC(operations_research, IntExpr, Max))
+ .def("set_min", &IntExprPythonHelper::SetMin,
+ DOC(operations_research, IntExpr, SetMin), arg("m"))
+ .def("set_max", &IntExprPythonHelper::SetMax,
+ DOC(operations_research, IntExpr, SetMax), arg("m"))
+ .def("set_range", &IntExprPythonHelper::SetRange,
+ DOC(operations_research, IntExpr, SetRange), arg("mi"), arg("ma"))
+ .def("set_value", &IntExprPythonHelper::SetValue,
+ DOC(operations_research, IntExpr, SetValue), arg("v"))
+ .def("bound", &IntExprPythonHelper::Bound,
+ DOC(operations_research, IntExpr, Bound))
+ .def(
+ "__add__",
+ [](IntExpr* e, int64_t arg) { return e->solver()->MakeSum(e, arg); },
+ pybind11::return_value_policy::reference_internal)
+ .def(
+ "__add__",
+ [](IntExpr* e, IntExpr* arg) { return e->solver()->MakeSum(e, arg); },
+ pybind11::return_value_policy::reference_internal)
+ .def(
+ "__radd__",
+ [](IntExpr* e, int64_t arg) { return e->solver()->MakeSum(e, arg); },
+ pybind11::return_value_policy::reference_internal)
+ .def(
+ "__radd__",
+ [](IntExpr* e, IntExpr* arg) { return e->solver()->MakeSum(e, arg); },
+ pybind11::return_value_policy::reference_internal)
+ .def(
+ "__mul__",
+ [](IntExpr* e, int64_t arg) { return e->solver()->MakeProd(e, arg); },
+ pybind11::return_value_policy::reference_internal)
+ .def(
+ "__mul__",
+ [](IntExpr* e, IntExpr* arg) {
+ return e->solver()->MakeProd(e, arg);
+ },
+ pybind11::return_value_policy::reference_internal)
+ .def(
+ "__rmul__",
+ [](IntExpr* e, int64_t arg) { return e->solver()->MakeProd(e, arg); },
+ pybind11::return_value_policy::reference_internal)
+ .def(
+ "__rmul__",
+ [](IntExpr* e, IntExpr* arg) {
+ return e->solver()->MakeProd(e, arg);
+ },
+ pybind11::return_value_policy::reference_internal)
+ .def(
+ "__eq__",
+ [](IntExpr* left, IntExpr* right) {
+ return left->solver()->MakeEquality(left, right);
+ },
+ pybind11::return_value_policy::reference_internal)
+ .def(
+ "__eq__",
+ [](IntExpr* left, int64_t right) {
+ return left->solver()->MakeEquality(left, right);
+ },
+ pybind11::return_value_policy::reference_internal);
+
+ // Note: no ctor.
+ pybind11::class_(m, "IntVar",
+ DOC(operations_research, IntVar))
+ .def("value", &IntVarPythonHelper::Value,
+ DOC(operations_research, IntVar, Value))
+ .def("remove_value", &IntVarPythonHelper::RemoveValue,
+ DOC(operations_research, IntVar, RemoveValue), arg("v"))
+ .def("size", &IntVarPythonHelper::Size,
+ DOC(operations_research, IntVar, Size));
+
+ // Note: no ctor.
+ pybind11::class_(m, "Constraint",
+ DOC(operations_research, Constraint))
+ .def("var", &Constraint::Var, DOC(operations_research, Constraint, Var));
+
+ // Note: no ctor.
+ pybind11::class_(
+ m, "DecisionBuilder", DOC(operations_research, DecisionBuilder))
+ .def_property("name", &DecisionBuilder::GetName,
+ &DecisionBuilder::set_name);
+
+ // Note: no ctor.
+ pybind11::class_(
+ m, "ModelVisitor", DOC(operations_research, ModelVisitor));
+
+ pybind11::class_(
+ m, "Assignment", DOC(operations_research, Assignment))
+ .def(pybind11::init())
+ .def("clear", &Assignment::Clear)
+ .def("empty", &Assignment::Empty)
+ .def("size", &Assignment::Size)
+ .def("num_int_vars", &Assignment::NumIntVars)
+ .def("num_interval_vars", &Assignment::NumIntervalVars)
+ .def("num_sequence_vars", &Assignment::NumSequenceVars)
+ .def("store", &Assignment::Store)
+ .def("restore", &Assignment::Restore)
+ .def("load",
+ pybind11::overload_cast(&Assignment::Load),
+ arg("filename"))
+ .def("load",
+ pybind11::overload_cast(&Assignment::Load),
+ arg("assignment_proto"))
+ .def("add_objective", &Assignment::AddObjective, arg("v"))
+ .def("add_objectives", &Assignment::AddObjectives, arg("vars"))
+ .def("clear_objective", &Assignment::ClearObjective)
+ .def("num_objectives", &Assignment::NumObjectives)
+ .def("objective", &Assignment::Objective)
+ .def("objective_from_index", &Assignment::ObjectiveFromIndex,
+ arg("index"))
+ .def("has_objective", &Assignment::HasObjective)
+ .def("has_objective_from_index", &Assignment::HasObjectiveFromIndex,
+ arg("index"))
+ .def("objective_min", &Assignment::ObjectiveMin)
+ .def("objective_max", &Assignment::ObjectiveMax)
+ .def("objective_value", &Assignment::ObjectiveValue)
+ .def("objective_bound", &Assignment::ObjectiveBound)
+ .def("set_objective_min", &Assignment::SetObjectiveMin, arg("m"))
+ .def("set_objective_max", &Assignment::SetObjectiveMax, arg("m"))
+ .def("set_objective_value", &Assignment::SetObjectiveValue, arg("value"))
+ .def("set_objective_range", &Assignment::SetObjectiveRange, arg("l"),
+ arg("u"))
+ .def("objective_min_from_index", &Assignment::ObjectiveMinFromIndex,
+ arg("index"))
+ .def("objective_max_from_index", &Assignment::ObjectiveMaxFromIndex,
+ arg("index"))
+ .def("objective_value_from_index", &Assignment::ObjectiveValueFromIndex,
+ arg("index"))
+ .def("objective_bound_from_index", &Assignment::ObjectiveBoundFromIndex,
+ arg("index"))
+ .def("set_objective_min_from_index",
+ &Assignment::SetObjectiveMinFromIndex, arg("index"), arg("m"))
+ .def("set_objective_max_from_index",
+ &Assignment::SetObjectiveMaxFromIndex, arg("index"), arg("m"))
+ .def("set_objective_range_from_index",
+ &Assignment::SetObjectiveRangeFromIndex, arg("index"), arg("l"),
+ arg("u"))
+ .def("add", pybind11::overload_cast(&Assignment::Add),
+ arg("var"))
+ .def("add",
+ pybind11::overload_cast&>(
+ &Assignment::Add),
+ arg("var"))
+ .def("min", &Assignment::Min, arg("var"))
+ .def("max", &Assignment::Max, arg("var"))
+ .def("value", &Assignment::Value, arg("var"))
+ .def("bound", &Assignment::Bound, arg("var"))
+ .def("set_min", &Assignment::SetMin, arg("var"), arg("m"))
+ .def("set_max", &Assignment::SetMax, arg("var"), arg("m"))
+ .def("set_range", &Assignment::SetRange, arg("var"), arg("l"), arg("u"))
+ .def("set_value", &Assignment::SetValue, arg("var"), arg("value"))
+ .def("add", pybind11::overload_cast(&Assignment::Add),
+ arg("var"))
+ .def("add",
+ pybind11::overload_cast&>(
+ &Assignment::Add),
+ arg("var"));
+ // missing IntervalVar, SequenceVar, active/deactivate, contains, copy
+}
diff --git a/ortools/constraint_solver/python/constraint_solver.i b/ortools/constraint_solver/python/constraint_solver.i
index 1e2ce4a2b40..498e4af1466 100644
--- a/ortools/constraint_solver/python/constraint_solver.i
+++ b/ortools/constraint_solver/python/constraint_solver.i
@@ -21,15 +21,15 @@
//
// USAGE EXAMPLES (most of which are also unit tests):
// - ./pywrapcp_test.py
-// - ortools/python/appointments.py
-// - ortools/python/golomb8.py
-// - ortools/python/hidato_table.py
-// - ortools/python/jobshop_ft06.py
-// - ortools/python/magic_sequence_distribute.py
-// - ortools/python/rabbit_pheasant.py
-// - ortools/python/simple_meeting.py
-// - ortools/python/sudoku.py
-// - ortools/python/zebra.py
+// - examples/python/appointments.py
+// - examples/python/golomb8.py
+// - examples/python/hidato_table.py
+// - examples/python/jobshop_ft06.py
+// - examples/python/magic_sequence_distribute.py
+// - examples/python/rabbit_pheasant.py
+// - examples/python/simple_meeting.py
+// - examples/python/sudoku.py
+// - examples/python/zebra.py
%include "ortools/base/base.i"
%include "ortools/util/python/proto.i"
diff --git a/ortools/constraint_solver/python/constraint_solver_doc.h b/ortools/constraint_solver/python/constraint_solver_doc.h
new file mode 100644
index 00000000000..c8dff6346f2
--- /dev/null
+++ b/ortools/constraint_solver/python/constraint_solver_doc.h
@@ -0,0 +1,6158 @@
+// Copyright 2010-2025 Google LLC
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+ This file contains docstrings for use in the Python bindings.
+ Do not edit! They were automatically extracted by pybind11_mkdoc.
+ */
+
+#define __EXPAND(x) x
+#define __COUNT(_1, _2, _3, _4, _5, _6, _7, COUNT, ...) COUNT
+#define __VA_SIZE(...) __EXPAND(__COUNT(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1))
+#define __CAT1(a, b) a##b
+#define __CAT2(a, b) __CAT1(a, b)
+#define __DOC1(n1) __doc_##n1
+#define __DOC2(n1, n2) __doc_##n1##_##n2
+#define __DOC3(n1, n2, n3) __doc_##n1##_##n2##_##n3
+#define __DOC4(n1, n2, n3, n4) __doc_##n1##_##n2##_##n3##_##n4
+#define __DOC5(n1, n2, n3, n4, n5) __doc_##n1##_##n2##_##n3##_##n4##_##n5
+#define __DOC6(n1, n2, n3, n4, n5, n6) \
+ __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6
+#define __DOC7(n1, n2, n3, n4, n5, n6, n7) \
+ __doc_##n1##_##n2##_##n3##_##n4##_##n5##_##n6##_##n7
+#define DOC(...) \
+ __EXPAND(__EXPAND(__CAT2(__DOC, __VA_SIZE(__VA_ARGS__)))(__VA_ARGS__))
+
+#if defined(__GNUG__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-variable"
+#endif
+
+static const char* __doc_ABSL_DECLARE_FLAG = R"doc()doc";
+
+static const char* __doc_File = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment =
+ R"doc(An Assignment is a variable -> domains mapping, used to report
+solutions to the user.)doc";
+
+static const char* __doc_operations_research_Assignment_2 =
+ R"doc(An Assignment is a variable -> domains mapping, used to report
+solutions to the user.)doc";
+
+static const char* __doc_operations_research_AssignmentContainer = R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Add =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_AddAtPosition =
+ R"doc(Advanced usage: Adds element at a given position; position has to have
+been allocated with AssignmentContainer::Resize() beforehand.)doc";
+
+static const char*
+ __doc_operations_research_AssignmentContainer_AreAllElementsBound =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_AssignmentContainer_AssignmentContainer =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Clear =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Contains =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Copy =
+ R"doc(Copies all the elements of 'container' to this container, clearing its
+previous content.)doc";
+
+static const char*
+ __doc_operations_research_AssignmentContainer_CopyIntersection =
+ R"doc(Copies the elements of 'container' which are already in the calling
+container.)doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Element =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Element_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_AssignmentContainer_ElementPtrOrNull =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Empty =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_AssignmentContainer_EnsureMapIsUpToDate =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_FastAdd =
+ R"doc(Adds element without checking its presence in the container.)doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Find =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_AssignmentContainer_MutableElement = R"doc()doc";
+
+static const char*
+ __doc_operations_research_AssignmentContainer_MutableElement_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_AssignmentContainer_MutableElementOrNull =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Resize =
+ R"doc(Advanced usage: Resizes the container, potentially adding elements
+with null variables.)doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Restore =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Size =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_Store =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_elements =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_elements_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_elements_map =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentContainer_operator_eq =
+ R"doc(Returns true if this and 'container' both represent the same V* -> E
+map. Runs in linear time; requires that the == operator on the type E
+is well defined.)doc";
+
+static const char* __doc_operations_research_AssignmentContainer_operator_ne =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentElement = R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentElement_Activate =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentElement_Activated =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_AssignmentElement_AssignmentElement = R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentElement_Deactivate =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentElement_activated =
+ R"doc()doc";
+
+static const char* __doc_operations_research_AssignmentProto = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Activate = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Activate_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Activate_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ActivateObjective =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_ActivateObjectiveFromIndex =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Activated = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Activated_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Activated_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ActivatedObjective =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_ActivatedObjectiveFromIndex =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Add = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Add_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Add_3 = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Add_4 = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Add_5 = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Add_6 = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_AddObjective =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_AddObjectives =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_AreAllElementsBound =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Assignment =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Assignment_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Assignment_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_BackwardSequence =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Bound = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Clear = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ClearObjective =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Contains = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Contains_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Contains_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Copy =
+ R"doc(Copies 'assignment' to the current assignment, clearing its previous
+content.)doc";
+
+static const char* __doc_operations_research_Assignment_CopyIntersection =
+ R"doc(Copies the intersection of the two assignments to the current
+assignment.)doc";
+
+static const char* __doc_operations_research_Assignment_Deactivate =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Deactivate_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Deactivate_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_DeactivateObjective =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_DeactivateObjectiveFromIndex =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_DurationMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_DurationMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_DurationValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Empty = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_EndMax = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_EndMin = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_EndValue = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_FastAdd =
+ R"doc(Adds without checking if variable has been previously added.)doc";
+
+static const char* __doc_operations_research_Assignment_FastAdd_2 =
+ R"doc(Adds without checking if variable has been previously added.)doc";
+
+static const char* __doc_operations_research_Assignment_FastAdd_3 =
+ R"doc(Adds without checking if the variable had been previously added.)doc";
+
+static const char* __doc_operations_research_Assignment_ForwardSequence =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_HasObjective =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_HasObjectiveFromIndex =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_IntVarContainer =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_IntervalVarContainer =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Load =
+ R"doc(Loads an assignment from a file; does not add variables to the
+assignment (only the variables contained in the assignment are
+modified).)doc";
+
+static const char* __doc_operations_research_Assignment_Load_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Load_3 =
+ R"doc(#if !defined(SWIG))doc";
+
+static const char* __doc_operations_research_Assignment_Max = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Min = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_MutableIntVarContainer =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_MutableIntervalVarContainer =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_MutableSequenceVarContainer =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_NumIntVars =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_NumIntervalVars =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_NumObjectives =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_NumSequenceVars =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Objective = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ObjectiveBound =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_ObjectiveBoundFromIndex = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ObjectiveFromIndex =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ObjectiveMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ObjectiveMaxFromIndex =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ObjectiveMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ObjectiveMinFromIndex =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_ObjectiveValue =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_ObjectiveValueFromIndex = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_PerformedMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_PerformedMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_PerformedValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Restore = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Save =
+ R"doc(Saves the assignment to a file.)doc";
+
+static const char* __doc_operations_research_Assignment_Save_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Save_3 = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SequenceVarContainer =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetBackwardSequence =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetDurationMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetDurationMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetDurationRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetDurationValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetEndMax = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetEndMin = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetEndRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetEndValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetForwardSequence =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetMax = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetMin = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetObjectiveMax =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_SetObjectiveMaxFromIndex = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetObjectiveMin =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_SetObjectiveMinFromIndex = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetObjectiveRange =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_SetObjectiveRangeFromIndex =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetObjectiveValue =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Assignment_SetObjectiveValueFromIndex =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetPerformedMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetPerformedMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetPerformedRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetPerformedValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetRange = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetSequence =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetStartMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetStartMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetStartRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetStartValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetUnperformed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_SetValue = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Size = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_StartMax = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_StartMin = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_StartValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Store = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Unperformed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_Value = R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_int_var_container =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_interval_var_container =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_objective_elements =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_operator_eq =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_operator_ne =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Assignment_sequence_var_container =
+ R"doc()doc";
+
+static const char* __doc_operations_research_BaseObject =
+ R"doc(A BaseObject is the root of all reversibly allocated objects. A
+DebugString method and the associated << operator are implemented as a
+convenience.)doc";
+
+static const char* __doc_operations_research_BaseObject_2 =
+ R"doc(A BaseObject is the root of all reversibly allocated objects. A
+DebugString method and the associated << operator are implemented as a
+convenience.)doc";
+
+static const char* __doc_operations_research_BaseObject_BaseObject =
+ R"doc()doc";
+
+static const char* __doc_operations_research_BaseObject_BaseObject_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_BaseObject_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_BaseObject_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_CastConstraint =
+ R"doc(Cast constraints are special channeling constraints designed to keep a
+variable in sync with an expression. They are created internally when
+Var() is called on a subclass of IntExpr.)doc";
+
+static const char* __doc_operations_research_CastConstraint_2 =
+ R"doc(Cast constraints are special channeling constraints designed to keep a
+variable in sync with an expression. They are created internally when
+Var() is called on a subclass of IntExpr.)doc";
+
+static const char* __doc_operations_research_CastConstraint_CastConstraint =
+ R"doc()doc";
+
+static const char* __doc_operations_research_CastConstraint_target_var =
+ R"doc()doc";
+
+static const char* __doc_operations_research_CastConstraint_target_var_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ClockTimer = R"doc()doc";
+
+static const char* __doc_operations_research_Constraint =
+ R"doc(A constraint is the main modeling object. It provides two methods: -
+Post() is responsible for creating the demons and attaching them to
+immediate demons(). - InitialPropagate() is called once just after
+Post and performs the initial propagation. The subsequent propagations
+will be performed by the demons Posted during the post() method.)doc";
+
+static const char* __doc_operations_research_Constraint_2 =
+ R"doc(A constraint is the main modeling object. It provides two methods: -
+Post() is responsible for creating the demons and attaching them to
+immediate demons(). - InitialPropagate() is called once just after
+Post and performs the initial propagation. The subsequent propagations
+will be performed by the demons Posted during the post() method.)doc";
+
+static const char* __doc_operations_research_Constraint_Accept =
+ R"doc(Accepts the given visitor.)doc";
+
+static const char* __doc_operations_research_Constraint_Constraint =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Constraint_Constraint_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Constraint_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Constraint_InitialPropagate =
+ R"doc(This method performs the initial propagation of the constraint. It is
+called just after the post.)doc";
+
+static const char* __doc_operations_research_Constraint_IsCastConstraint =
+ R"doc(Is the constraint created by a cast from expression to integer
+variable?)doc";
+
+static const char* __doc_operations_research_Constraint_Post =
+ R"doc(This method is called when the constraint is processed by the solver.
+Its main usage is to attach demons to variables.)doc";
+
+static const char* __doc_operations_research_Constraint_PostAndPropagate =
+ R"doc(Calls Post and then Propagate to initialize the constraints. This is
+usually done in the root node.)doc";
+
+static const char* __doc_operations_research_Constraint_Var =
+ R"doc(Creates a Boolean variable representing the status of the constraint
+(false = constraint is violated, true = constraint is satisfied). It
+returns nullptr if the constraint does not support this API.)doc";
+
+static const char* __doc_operations_research_Constraint_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_CpRandomSeed = R"doc()doc";
+
+static const char* __doc_operations_research_Decision =
+ R"doc(A Decision represents a choice point in the search tree. The two main
+methods are Apply() to go left, or Refute() to go right.)doc";
+
+static const char* __doc_operations_research_Decision_2 =
+ R"doc(A Decision represents a choice point in the search tree. The two main
+methods are Apply() to go left, or Refute() to go right.)doc";
+
+static const char* __doc_operations_research_DecisionBuilder =
+ R"doc(A DecisionBuilder is responsible for creating the search tree. The
+important method is Next(), which returns the next decision to
+execute.)doc";
+
+static const char* __doc_operations_research_DecisionBuilder_2 =
+ R"doc(A DecisionBuilder is responsible for creating the search tree. The
+important method is Next(), which returns the next decision to
+execute.)doc";
+
+static const char* __doc_operations_research_DecisionBuilder_Accept =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DecisionBuilder_AppendMonitors =
+ R"doc(This method will be called at the start of the search. It asks the
+decision builder if it wants to append search monitors to the list of
+active monitors for this search. Please note there are no checks at
+this point for duplication.)doc";
+
+static const char* __doc_operations_research_DecisionBuilder_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DecisionBuilder_DecisionBuilder =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DecisionBuilder_DecisionBuilder_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DecisionBuilder_GetName =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DecisionBuilder_Next =
+ R"doc(This is the main method of the decision builder class. It must return
+a decision (an instance of the class Decision). If it returns nullptr,
+this means that the decision builder has finished its work.)doc";
+
+static const char* __doc_operations_research_DecisionBuilder_name = R"doc()doc";
+
+static const char* __doc_operations_research_DecisionBuilder_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DecisionBuilder_set_name =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DecisionVisitor =
+ R"doc(A DecisionVisitor is used to inspect a decision. It contains virtual
+methods for all type of 'declared' decisions.)doc";
+
+static const char* __doc_operations_research_DecisionVisitor_2 =
+ R"doc(A DecisionVisitor is used to inspect a decision. It contains virtual
+methods for all type of 'declared' decisions.)doc";
+
+static const char* __doc_operations_research_DecisionVisitor_DecisionVisitor =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DecisionVisitor_DecisionVisitor_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DecisionVisitor_VisitRankFirstInterval =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DecisionVisitor_VisitRankLastInterval =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DecisionVisitor_VisitScheduleOrExpedite =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DecisionVisitor_VisitScheduleOrPostpone =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DecisionVisitor_VisitSetVariableValue =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DecisionVisitor_VisitSplitVariableDomain =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DecisionVisitor_VisitUnknownDecision =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DecisionVisitor_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Decision_Accept =
+ R"doc(Accepts the given visitor.)doc";
+
+static const char* __doc_operations_research_Decision_Apply =
+ R"doc(Apply will be called first when the decision is executed.)doc";
+
+static const char* __doc_operations_research_Decision_DebugString = R"doc()doc";
+
+static const char* __doc_operations_research_Decision_Decision = R"doc()doc";
+
+static const char* __doc_operations_research_Decision_Decision_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Decision_Refute =
+ R"doc(Refute will be called after a backtrack.)doc";
+
+static const char* __doc_operations_research_Decision_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DefaultPhaseParameters =
+ R"doc(This struct holds all parameters for the default search.
+DefaultPhaseParameters is only used by Solver::MakeDefaultPhase
+methods. Note this is for advanced users only.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_DefaultPhaseParameters =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_DisplayLevel = R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_DisplayLevel_NONE =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_DisplayLevel_NORMAL =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_DisplayLevel_VERBOSE =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_ValueSelection =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_ValueSelection_SELECT_MAX_IMPACT =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_ValueSelection_SELECT_MIN_IMPACT =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_VariableSelection =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_VariableSelection_CHOOSE_MAX_AVERAGE_IMPACT =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_VariableSelection_CHOOSE_MAX_SUM_IMPACT =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_VariableSelection_CHOOSE_MAX_VALUE_IMPACT =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_decision_builder =
+ R"doc(When defined, this overrides the default impact based decision
+builder.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_display_level =
+ R"doc(This represents the amount of information displayed by the default
+search. NONE means no display, VERBOSE means extra information.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_heuristic_num_failures_limit =
+ R"doc(The failure limit for each heuristic that we run.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_heuristic_period =
+ R"doc(The distance in nodes between each run of the heuristics. A negative
+or null value will mean that we will not run heuristics at all.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_initialization_splits =
+ R"doc(Maximum number of intervals that the initialization of impacts will
+scan per variable.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_persistent_impact =
+ R"doc(Whether to keep the impact from the first search for other searches,
+or to recompute the impact for each new search.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_random_seed =
+ R"doc(Seed used to initialize the random part in some heuristics.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_run_all_heuristics =
+ R"doc(The default phase will run heuristics periodically. This parameter
+indicates if we should run all heuristics, or a randomly selected one.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_use_last_conflict =
+ R"doc(Should we use last conflict method. The default is false.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_value_selection_schema =
+ R"doc(This parameter describes which value to select for a given var.)doc";
+
+static const char*
+ __doc_operations_research_DefaultPhaseParameters_var_selection_schema =
+ R"doc(This parameter describes how the next variable to instantiate will be
+chosen.)doc";
+
+static const char* __doc_operations_research_Demon =
+ R"doc(A Demon is the base element of a propagation queue. It is the main
+object responsible for implementing the actual propagation of the
+constraint and pruning the inconsistent values in the domains of the
+variables. The main concept is that demons are listeners that are
+attached to the variables and listen to their modifications. There are
+two methods: - Run() is the actual method called when the demon is
+processed. - priority() returns its priority. Standard priorities are
+slow, normal or fast. "immediate" is reserved for variables and is
+treated separately.)doc";
+
+static const char* __doc_operations_research_Demon_2 =
+ R"doc(A Demon is the base element of a propagation queue. It is the main
+object responsible for implementing the actual propagation of the
+constraint and pruning the inconsistent values in the domains of the
+variables. The main concept is that demons are listeners that are
+attached to the variables and listen to their modifications. There are
+two methods: - Run() is the actual method called when the demon is
+processed. - priority() returns its priority. Standard priorities are
+slow, normal or fast. "immediate" is reserved for variables and is
+treated separately.)doc";
+
+static const char* __doc_operations_research_DemonProfiler = R"doc()doc";
+
+static const char* __doc_operations_research_Demon_DebugString = R"doc()doc";
+
+static const char* __doc_operations_research_Demon_Demon =
+ R"doc(This indicates the priority of a demon. Immediate demons are treated
+separately and corresponds to variables.)doc";
+
+static const char* __doc_operations_research_Demon_Demon_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Demon_Run =
+ R"doc(This is the main callback of the demon.)doc";
+
+static const char* __doc_operations_research_Demon_desinhibit =
+ R"doc(This method un-inhibits the demon that was previously inhibited.)doc";
+
+static const char* __doc_operations_research_Demon_inhibit =
+ R"doc(This method inhibits the demon in the search tree below the current
+position.)doc";
+
+static const char* __doc_operations_research_Demon_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Demon_priority =
+ R"doc(This method returns the priority of the demon. Usually a demon is
+fast, slow or normal. Immediate demons are reserved for internal use
+to maintain variables.)doc";
+
+static const char* __doc_operations_research_Demon_set_stamp = R"doc()doc";
+
+static const char* __doc_operations_research_Demon_stamp = R"doc()doc";
+
+static const char* __doc_operations_research_Demon_stamp_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Dimension = R"doc()doc";
+
+static const char* __doc_operations_research_DisjunctiveConstraint =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DisjunctiveConstraint_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DisjunctiveConstraint_DisjunctiveConstraint =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DisjunctiveConstraint_DisjunctiveConstraint_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DisjunctiveConstraint_MakeSequenceVar =
+ R"doc(Creates a sequence variable from the constraint.)doc";
+
+static const char*
+ __doc_operations_research_DisjunctiveConstraint_SetTransitionTime =
+ R"doc(Add a transition time between intervals. It forces the distance
+between the end of interval a and start of interval b that follows it
+to be at least transition_time(a, b). This function must always return
+a positive or null value.)doc";
+
+static const char*
+ __doc_operations_research_DisjunctiveConstraint_TransitionTime =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DisjunctiveConstraint_actives =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DisjunctiveConstraint_intervals =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DisjunctiveConstraint_nexts =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DisjunctiveConstraint_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DisjunctiveConstraint_time_cumuls =
+ R"doc()doc";
+
+static const char* __doc_operations_research_DisjunctiveConstraint_time_slacks =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_DisjunctiveConstraint_transition_time =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ImprovementSearchLimit =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ImprovementSearchLimit_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ImprovementSearchLimit_AtSolution =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_CheckWithOffset =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ImprovementSearchLimit_Copy =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_ImprovementSearchLimit =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_ImprovementSearchLimit_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ImprovementSearchLimit_Init =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ImprovementSearchLimit_Install =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ImprovementSearchLimit_MakeClone =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_best_objectives =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_gradient_stage =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_improvement_rate_coefficient =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_improvement_rate_solutions_distance =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_improvements = R"doc()doc";
+
+static const char* __doc_operations_research_ImprovementSearchLimit_maximize =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_objective_offsets =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_objective_scaling_factors =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_objective_updated =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ImprovementSearchLimit_objective_vars =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ImprovementSearchLimit_thresholds =
+ R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues =
+ R"doc(Utility class to encapsulate an IntVarIterator and use it in a range-
+based loop. See the code snippet above IntVarIterator.
+
+It contains DEBUG_MODE-enabled code that DCHECKs that the same
+iterator instance isn't being iterated on in multiple places
+simultaneously.)doc";
+
+static const char* __doc_operations_research_InitAndGetValues_InitAndGetValues =
+ R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues_Iterator =
+ R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues_Iterator_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues_Iterator_Begin =
+ R"doc(These are the only way to construct an Iterator.)doc";
+
+static const char* __doc_operations_research_InitAndGetValues_Iterator_End =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_InitAndGetValues_Iterator_Iterator = R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues_Iterator_is_end =
+ R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues_Iterator_it =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_InitAndGetValues_Iterator_operator_inc =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_InitAndGetValues_Iterator_operator_mul =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_InitAndGetValues_Iterator_operator_ne =
+ R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues_begin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues_begin_was_called =
+ R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues_end = R"doc()doc";
+
+static const char* __doc_operations_research_InitAndGetValues_it = R"doc()doc";
+
+static const char* __doc_operations_research_IntExpr =
+ R"doc(The class IntExpr is the base of all integer expressions in constraint
+programming. It contains the basic protocol for an expression: -
+setting and modifying its bound - querying if it is bound - listening
+to events modifying its bounds - casting it into a variable (instance
+of IntVar))doc";
+
+static const char* __doc_operations_research_IntExpr_2 =
+ R"doc(The class IntExpr is the base of all integer expressions in constraint
+programming. It contains the basic protocol for an expression: -
+setting and modifying its bound - querying if it is bound - listening
+to events modifying its bounds - casting it into a variable (instance
+of IntVar))doc";
+
+static const char* __doc_operations_research_IntExpr_Accept =
+ R"doc(Accepts the given visitor.)doc";
+
+static const char* __doc_operations_research_IntExpr_Bound =
+ R"doc(Returns true if the min and the max of the expression are equal.)doc";
+
+static const char* __doc_operations_research_IntExpr_IntExpr = R"doc()doc";
+
+static const char* __doc_operations_research_IntExpr_IntExpr_2 = R"doc()doc";
+
+static const char* __doc_operations_research_IntExpr_IsVar =
+ R"doc(Returns true if the expression is indeed a variable.)doc";
+
+static const char* __doc_operations_research_IntExpr_Max = R"doc()doc";
+
+static const char* __doc_operations_research_IntExpr_Min = R"doc()doc";
+
+static const char* __doc_operations_research_IntExpr_Range =
+ R"doc(By default calls Min() and Max(), but can be redefined when Min and
+Max code can be factorized.)doc";
+
+static const char* __doc_operations_research_IntExpr_SetMax = R"doc()doc";
+
+static const char* __doc_operations_research_IntExpr_SetMin = R"doc()doc";
+
+static const char* __doc_operations_research_IntExpr_SetRange =
+ R"doc(This method sets both the min and the max of the expression.)doc";
+
+static const char* __doc_operations_research_IntExpr_SetValue =
+ R"doc(This method sets the value of the expression.)doc";
+
+static const char* __doc_operations_research_IntExpr_Var =
+ R"doc(Creates a variable from the expression.)doc";
+
+static const char* __doc_operations_research_IntExpr_VarWithName =
+ R"doc(Creates a variable from the expression and set the name of the
+resulting var. If the expression is already a variable, then it will
+set the name of the expression, possibly overwriting it. This is just
+a shortcut to Var() followed by set_name().)doc";
+
+static const char* __doc_operations_research_IntExpr_WhenRange =
+ R"doc(Attach a demon that will watch the min or the max of the expression.)doc";
+
+static const char* __doc_operations_research_IntExpr_WhenRange_2 =
+ R"doc(Attach a demon that will watch the min or the max of the expression.)doc";
+
+static const char* __doc_operations_research_IntExpr_WhenRange_3 =
+ R"doc(Attach a demon that will watch the min or the max of the expression.)doc";
+
+static const char* __doc_operations_research_IntExpr_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVar =
+ R"doc(The class IntVar is a subset of IntExpr. In addition to the IntExpr
+protocol, it offers persistence, removing values from the domains, and
+a finer model for events.)doc";
+
+static const char* __doc_operations_research_IntVar_2 =
+ R"doc(The class IntVar is a subset of IntExpr. In addition to the IntExpr
+protocol, it offers persistence, removing values from the domains, and
+a finer model for events.)doc";
+
+static const char* __doc_operations_research_IntVarAssignment = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Bound = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Clone = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Copy = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_IntVarElement =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_IntVarElement_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_LoadFromProto =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Max = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Min = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Reset = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Restore =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_SetMax = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_SetMin = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_SetRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_SetValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Store = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Value = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_Var = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_WriteToProto =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_max = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_min = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_operator_eq =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_operator_ne =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVarElement_var = R"doc()doc";
+
+static const char* __doc_operations_research_IntVarIterator =
+ R"doc(IntVar* current_var; std::unique_ptr
+it(current_var->MakeHoleIterator(false)); for (const int64_t hole :
+InitAndGetValues(it)) { /// use the hole })doc";
+
+static const char* __doc_operations_research_IntVarIterator_DebugString =
+ R"doc(Pretty Print.)doc";
+
+static const char* __doc_operations_research_IntVarIterator_Init =
+ R"doc(This method must be called before each loop.)doc";
+
+static const char* __doc_operations_research_IntVarIterator_Next =
+ R"doc(This method moves the iterator to the next value.)doc";
+
+static const char* __doc_operations_research_IntVarIterator_Ok =
+ R"doc(This method indicates if we can call Value() or not.)doc";
+
+static const char* __doc_operations_research_IntVarIterator_Value =
+ R"doc(This method returns the current value of the iterator.)doc";
+
+static const char* __doc_operations_research_IntVarLocalSearchFilter =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_Accept =
+ R"doc(Accepts the given visitor.)doc";
+
+static const char* __doc_operations_research_IntVar_Contains =
+ R"doc(This method returns whether the value 'v' is in the domain of the
+variable.)doc";
+
+static const char* __doc_operations_research_IntVar_IntVar = R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_IntVar_2 = R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_IntVar_3 = R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_IsDifferent = R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_IsEqual =
+ R"doc(IsEqual)doc";
+
+static const char* __doc_operations_research_IntVar_IsGreaterOrEqual =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_IsLessOrEqual = R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_IsVar = R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_MakeDomainIterator =
+ R"doc(Creates a domain iterator. When 'reversible' is false, the returned
+object is created on the normal C++ heap and the solver does NOT take
+ownership of the object.)doc";
+
+static const char* __doc_operations_research_IntVar_MakeHoleIterator =
+ R"doc(Creates a hole iterator. When 'reversible' is false, the returned
+object is created on the normal C++ heap and the solver does NOT take
+ownership of the object.)doc";
+
+static const char* __doc_operations_research_IntVar_OldMax =
+ R"doc(Returns the previous max.)doc";
+
+static const char* __doc_operations_research_IntVar_OldMin =
+ R"doc(Returns the previous min.)doc";
+
+static const char* __doc_operations_research_IntVar_RemoveInterval =
+ R"doc(This method removes the interval 'l' .. 'u' from the domain of the
+variable. It assumes that 'l' <= 'u'.)doc";
+
+static const char* __doc_operations_research_IntVar_RemoveValue =
+ R"doc(This method removes the value 'v' from the domain of the variable.)doc";
+
+static const char* __doc_operations_research_IntVar_RemoveValues =
+ R"doc(This method remove the values from the domain of the variable.)doc";
+
+static const char* __doc_operations_research_IntVar_SetValues =
+ R"doc(This method intersects the current domain with the values in the
+array.)doc";
+
+static const char* __doc_operations_research_IntVar_Size =
+ R"doc(This method returns the number of values in the domain of the
+variable.)doc";
+
+static const char* __doc_operations_research_IntVar_Value =
+ R"doc(This method returns the value of the variable. This method checks
+before that the variable is bound.)doc";
+
+static const char* __doc_operations_research_IntVar_Var = R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_VarType = R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_WhenBound =
+ R"doc(This method attaches a demon that will be awakened when the variable
+is bound.)doc";
+
+static const char* __doc_operations_research_IntVar_WhenBound_2 =
+ R"doc(This method attaches a closure that will be awakened when the variable
+is bound.)doc";
+
+static const char* __doc_operations_research_IntVar_WhenBound_3 =
+ R"doc(This method attaches an action that will be awakened when the variable
+is bound.)doc";
+
+static const char* __doc_operations_research_IntVar_WhenDomain =
+ R"doc(This method attaches a demon that will watch any domain modification
+of the domain of the variable.)doc";
+
+static const char* __doc_operations_research_IntVar_WhenDomain_2 =
+ R"doc(This method attaches a closure that will watch any domain modification
+of the domain of the variable.)doc";
+
+static const char* __doc_operations_research_IntVar_WhenDomain_3 =
+ R"doc(This method attaches an action that will watch any domain modification
+of the domain of the variable.)doc";
+
+static const char* __doc_operations_research_IntVar_index =
+ R"doc(Returns the index of the variable.)doc";
+
+static const char* __doc_operations_research_IntVar_index_2 = R"doc()doc";
+
+static const char* __doc_operations_research_IntVar_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar =
+ R"doc(Interval variables are often used in scheduling. The main
+characteristics of an IntervalVar are the start position, duration,
+and end date. All these characteristics can be queried and set, and
+demons can be posted on their modifications.
+
+An important aspect is optionality: an IntervalVar can be performed or
+not. If unperformed, then it simply does not exist, and its
+characteristics cannot be accessed any more. An interval var is
+automatically marked as unperformed when it is not consistent anymore
+(start greater than end, duration < 0...))doc";
+
+static const char* __doc_operations_research_IntervalVar_2 =
+ R"doc(Interval variables are often used in scheduling. The main
+characteristics of an IntervalVar are the start position, duration,
+and end date. All these characteristics can be queried and set, and
+demons can be posted on their modifications.
+
+An important aspect is optionality: an IntervalVar can be performed or
+not. If unperformed, then it simply does not exist, and its
+characteristics cannot be accessed any more. An interval var is
+automatically marked as unperformed when it is not consistent anymore
+(start greater than end, duration < 0...))doc";
+
+static const char* __doc_operations_research_IntervalVarAssignment =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement = R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_Bound =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_Clone =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_Copy =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_DurationMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_DurationMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_DurationValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_EndMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_EndMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_EndValue =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_IntervalVarElement_IntervalVarElement =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_IntervalVarElement_IntervalVarElement_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_LoadFromProto =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_PerformedMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_PerformedMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_PerformedValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_Reset =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_Restore =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetDurationMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetDurationMin =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_IntervalVarElement_SetDurationRange = R"doc()doc";
+
+static const char*
+ __doc_operations_research_IntervalVarElement_SetDurationValue = R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetEndMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetEndMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetEndRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetEndValue =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_IntervalVarElement_SetPerformedMax = R"doc()doc";
+
+static const char*
+ __doc_operations_research_IntervalVarElement_SetPerformedMin = R"doc()doc";
+
+static const char*
+ __doc_operations_research_IntervalVarElement_SetPerformedRange =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_IntervalVarElement_SetPerformedValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetStartMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetStartMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetStartRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_SetStartValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_StartMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_StartMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_StartValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_Store =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_Var =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_WriteToProto =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_duration_max =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_duration_min =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_end_max =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_end_min =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_operator_eq =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_operator_ne =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_performed_max =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_performed_min =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_start_max =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_start_min =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVarElement_var =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_Accept =
+ R"doc(Accepts the given visitor.)doc";
+
+static const char* __doc_operations_research_IntervalVar_CannotBePerformed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_DurationExpr =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_DurationMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_DurationMin =
+ R"doc(These methods query, set, and watch the duration of the interval var.)doc";
+
+static const char* __doc_operations_research_IntervalVar_EndExpr = R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_EndMax = R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_EndMin =
+ R"doc(These methods query, set, and watch the end position of the interval
+var.)doc";
+
+static const char* __doc_operations_research_IntervalVar_IntervalVar =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_IntervalVar_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_IsPerformedBound =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_MayBePerformed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_MustBePerformed =
+ R"doc(These methods query, set, and watch the performed status of the
+interval var.)doc";
+
+static const char* __doc_operations_research_IntervalVar_OldDurationMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_OldDurationMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_OldEndMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_OldEndMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_OldStartMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_OldStartMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_PerformedExpr =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SafeDurationExpr =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SafeEndExpr =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SafeStartExpr =
+ R"doc(These methods create expressions encapsulating the start, end and
+duration of the interval var. If the interval var is unperformed, they
+will return the unperformed_value.)doc";
+
+static const char* __doc_operations_research_IntervalVar_SetDurationMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SetDurationMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SetDurationRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SetEndMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SetEndMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SetEndRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SetPerformed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SetStartMax =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SetStartMin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_SetStartRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_StartExpr =
+ R"doc(These methods create expressions encapsulating the start, end and
+duration of the interval var. Please note that these must not be used
+if the interval var is unperformed.)doc";
+
+static const char* __doc_operations_research_IntervalVar_StartMax = R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_StartMin =
+ R"doc(These methods query, set, and watch the start position of the interval
+var.)doc";
+
+static const char* __doc_operations_research_IntervalVar_WasPerformedBound =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenAnything =
+ R"doc(Attaches a demon awakened when anything about this interval changes.)doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenAnything_2 =
+ R"doc(Attaches a closure awakened when anything about this interval changes.)doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenAnything_3 =
+ R"doc(Attaches an action awakened when anything about this interval changes.)doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenDurationBound =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenDurationBound_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenDurationBound_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenDurationRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenDurationRange_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenDurationRange_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenEndBound =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenEndBound_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenEndBound_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenEndRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenEndRange_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenEndRange_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenPerformedBound =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenPerformedBound_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenPerformedBound_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenStartBound =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenStartBound_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenStartBound_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenStartRange =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenStartRange_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_WhenStartRange_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_IntervalVar_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_LightIntFunctionElementCt =
+ R"doc()doc";
+
+static const char* __doc_operations_research_LightIntIntFunctionElementCt =
+ R"doc()doc";
+
+static const char* __doc_operations_research_LocalSearch = R"doc()doc";
+
+static const char* __doc_operations_research_LocalSearchFilter = R"doc()doc";
+
+static const char* __doc_operations_research_LocalSearchFilterManager =
+ R"doc()doc";
+
+static const char* __doc_operations_research_LocalSearchMonitor = R"doc()doc";
+
+static const char* __doc_operations_research_LocalSearchOperator = R"doc()doc";
+
+static const char* __doc_operations_research_LocalSearchPhaseParameters =
+ R"doc()doc";
+
+static const char* __doc_operations_research_LocalSearchProfiler = R"doc()doc";
+
+static const char* __doc_operations_research_ModelCache = R"doc()doc";
+
+static const char* __doc_operations_research_ModelVisitor =
+ R"doc(Model visitor.)doc";
+
+static const char* __doc_operations_research_ModelVisitor_2 =
+ R"doc(Model visitor.)doc";
+
+static const char* __doc_operations_research_ModelVisitor_BeginVisitConstraint =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ModelVisitor_BeginVisitExtension =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_BeginVisitIntegerExpression =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ModelVisitor_BeginVisitModel =
+ R"doc(Begin/End visit element.)doc";
+
+static const char* __doc_operations_research_ModelVisitor_EndVisitConstraint =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ModelVisitor_EndVisitExtension =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_EndVisitIntegerExpression =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ModelVisitor_EndVisitModel =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitInt64ToBoolExtension =
+ R"doc(Using SWIG on callbacks is troublesome, so we hide these methods
+during the wrapping.)doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitInt64ToInt64AsArray =
+ R"doc(Expands function as array when index min is 0.)doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitInt64ToInt64Extension =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ModelVisitor_VisitIntegerArgument =
+ R"doc(Visit integer arguments.)doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitIntegerArrayArgument =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitIntegerExpressionArgument =
+ R"doc(Visit integer expression argument.)doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitIntegerMatrixArgument =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ModelVisitor_VisitIntegerVariable =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitIntegerVariable_2 = R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitIntegerVariableArrayArgument =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitIntegerVariableEvaluatorArgument =
+ R"doc(Helpers.)doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitIntervalArgument =
+ R"doc(Visit interval argument.)doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitIntervalArrayArgument =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitIntervalVariable = R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitSequenceArgument =
+ R"doc(Visit sequence argument.)doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitSequenceArrayArgument =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ModelVisitor_VisitSequenceVariable = R"doc()doc";
+
+static const char* __doc_operations_research_NumericalRev =
+ R"doc(Subclass of Rev which adds numerical operations.)doc";
+
+static const char* __doc_operations_research_NumericalRevArray =
+ R"doc(Subclass of RevArray which adds numerical operations.)doc";
+
+static const char* __doc_operations_research_NumericalRevArray_Add =
+ R"doc()doc";
+
+static const char* __doc_operations_research_NumericalRevArray_Decr =
+ R"doc()doc";
+
+static const char* __doc_operations_research_NumericalRevArray_Incr =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_NumericalRevArray_NumericalRevArray = R"doc()doc";
+
+static const char* __doc_operations_research_NumericalRev_Add = R"doc()doc";
+
+static const char* __doc_operations_research_NumericalRev_Decr = R"doc()doc";
+
+static const char* __doc_operations_research_NumericalRev_Incr = R"doc()doc";
+
+static const char* __doc_operations_research_NumericalRev_NumericalRev =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor = R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_2 = R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_Accept =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_AcceptDelta =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_AtSolution =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_BestInternalValue = R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_BestValue =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_CurrentInternalValue =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_CurrentInternalValuesAreConstraining =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_EnterSearch =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_MakeMinimizationVarsLessOrEqualWithSteps =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_MakeMinimizationVarsLessOrEqualWithStepsStatus =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_Maximize =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_MinimizationVar =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_ObjectiveMonitor =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_ObjectiveMonitor_2 = R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_ObjectiveVar =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_SetCurrentInternalValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_Size =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_Step =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_best_values =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_current_values =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_found_initial_solution =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_minimization_vars = R"doc()doc";
+
+static const char*
+ __doc_operations_research_ObjectiveMonitor_minimization_vars_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_objective_vars =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_objective_vars_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_steps =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ObjectiveMonitor_upper_bounds =
+ R"doc()doc";
+
+static const char* __doc_operations_research_One =
+ R"doc(This method returns 1)doc";
+
+static const char* __doc_operations_research_OptimizeVar =
+ R"doc(This class encapsulates an objective. It requires the direction
+(minimize or maximize), the variable to optimize, and the improvement
+step.)doc";
+
+static const char* __doc_operations_research_OptimizeVar_2 =
+ R"doc(This class encapsulates an objective. It requires the direction
+(minimize or maximize), the variable to optimize, and the improvement
+step.)doc";
+
+static const char* __doc_operations_research_OptimizeVar_AcceptSolution =
+ R"doc()doc";
+
+static const char* __doc_operations_research_OptimizeVar_ApplyBound =
+ R"doc()doc";
+
+static const char* __doc_operations_research_OptimizeVar_AtSolution =
+ R"doc()doc";
+
+static const char* __doc_operations_research_OptimizeVar_BeginNextDecision =
+ R"doc(Internal methods.)doc";
+
+static const char* __doc_operations_research_OptimizeVar_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_OptimizeVar_Name = R"doc()doc";
+
+static const char* __doc_operations_research_OptimizeVar_OptimizeVar =
+ R"doc()doc";
+
+static const char* __doc_operations_research_OptimizeVar_OptimizeVar_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_OptimizeVar_RefuteDecision =
+ R"doc()doc";
+
+static const char* __doc_operations_research_OptimizeVar_best =
+ R"doc(Returns the best value found during search.)doc";
+
+static const char* __doc_operations_research_OptimizeVar_var =
+ R"doc(Returns the variable that is optimized.)doc";
+
+static const char* __doc_operations_research_Pack = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_Accept = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Pack_AddCountAssignedItemsDimension =
+ R"doc(This dimension links 'count_var' to the actual number of items
+assigned to a bin in the pack.)doc";
+
+static const char* __doc_operations_research_Pack_AddCountUsedBinDimension =
+ R"doc(This dimension links 'count_var' to the actual number of bins used in
+the pack.)doc";
+
+static const char*
+ __doc_operations_research_Pack_AddSumVariableWeightsLessOrEqualConstantDimension =
+ R"doc(This dimension imposes: forall b in bins, sum (i in items: usage[i] *
+is_assigned(i, b)) <= capacity[b] where is_assigned(i, b) is true if
+and only if item i is assigned to the bin b.
+
+This can be used to model shapes of items by linking variables of the
+same item on parallel dimensions with an allowed assignment
+constraint.)doc";
+
+static const char*
+ __doc_operations_research_Pack_AddWeightedSumEqualVarDimension =
+ R"doc(This dimension imposes that for all bins b, the weighted sum
+(weights[i]) of all objects i assigned to 'b' is equal to loads[b].)doc";
+
+static const char*
+ __doc_operations_research_Pack_AddWeightedSumEqualVarDimension_2 =
+ R"doc(This dimension imposes that for all bins b, the weighted sum
+(weights->Run(i, b)) of all objects i assigned to 'b' is equal to
+loads[b].)doc";
+
+static const char*
+ __doc_operations_research_Pack_AddWeightedSumLessOrEqualConstantDimension =
+ R"doc(This dimension imposes that for all bins b, the weighted sum
+(weights[i]) of all objects i assigned to 'b' is less or equal
+'bounds[b]'.)doc";
+
+static const char*
+ __doc_operations_research_Pack_AddWeightedSumLessOrEqualConstantDimension_2 =
+ R"doc(This dimension imposes that for all bins b, the weighted sum
+(weights->Run(i)) of all objects i assigned to 'b' is less or equal to
+'bounds[b]'. Ownership of the callback is transferred to the pack
+constraint.)doc";
+
+static const char*
+ __doc_operations_research_Pack_AddWeightedSumLessOrEqualConstantDimension_3 =
+ R"doc(This dimension imposes that for all bins b, the weighted sum
+(weights->Run(i, b) of all objects i assigned to 'b' is less or equal
+to 'bounds[b]'. Ownership of the callback is transferred to the pack
+constraint.)doc";
+
+static const char*
+ __doc_operations_research_Pack_AddWeightedSumOfAssignedDimension =
+ R"doc(This dimension enforces that cost_var == sum of weights[i] for all
+objects 'i' assigned to a bin.)doc";
+
+static const char* __doc_operations_research_Pack_Assign = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_AssignAllPossibleToBin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Pack_AssignAllRemainingItems =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Pack_AssignFirstPossibleToBin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Pack_AssignVar = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_ClearAll = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_DebugString = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_InitialPropagate =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Pack_IsAssignedStatusKnown =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Pack_IsInProcess = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_IsPossible = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_IsUndecided = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_OneDomain = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_Pack = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_Post = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_Propagate = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_PropagateDelayed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Pack_RemoveAllPossibleFromBin =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Pack_SetAssigned = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_SetImpossible = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_SetUnassigned = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_UnassignAllRemainingItems =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Pack_bins = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_demon = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_dims = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_forced = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_holes = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_in_process = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_removed = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_stamp = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_to_set = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_to_unset = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_unprocessed = R"doc()doc";
+
+static const char* __doc_operations_research_Pack_vars = R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder_Accept =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ProfiledDecisionBuilder_AppendMonitors =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ProfiledDecisionBuilder_DebugString = R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder_Next =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_ProfiledDecisionBuilder_ProfiledDecisionBuilder =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder_db =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder_name =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder_name_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder_seconds =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder_seconds_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_ProfiledDecisionBuilder_timer =
+ R"doc()doc";
+
+static const char* __doc_operations_research_PropagationBaseObject =
+ R"doc(The PropagationBaseObject is a subclass of BaseObject that is also
+friend to the Solver class. It allows accessing methods useful when
+writing new constraints or new expressions.)doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_2 =
+ R"doc(The PropagationBaseObject is a subclass of BaseObject that is also
+friend to the Solver class. It allows accessing methods useful when
+writing new constraints or new expressions.)doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_BaseName =
+ R"doc(Returns a base name for automatic naming.)doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_EnqueueAll =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_PropagationBaseObject_EnqueueDelayedDemon =
+ R"doc(This method pushes the demon onto the propagation queue. It will be
+processed directly if the queue is empty. It will be enqueued
+according to its priority otherwise.)doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_EnqueueVar =
+ R"doc()doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_ExecuteAll =
+ R"doc()doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_FreezeQueue =
+ R"doc(This method freezes the propagation queue. It is useful when you need
+to apply multiple modifications at once.)doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_HasName =
+ R"doc(Returns whether the object has been named or not.)doc";
+
+static const char*
+ __doc_operations_research_PropagationBaseObject_PropagationBaseObject =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_PropagationBaseObject_PropagationBaseObject_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_PropagationBaseObject_UnfreezeQueue =
+ R"doc(This method unfreezes the propagation queue. All modifications that
+happened when the queue was frozen will be processed.)doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_name =
+ R"doc(Object naming.)doc";
+
+static const char*
+ __doc_operations_research_PropagationBaseObject_operator_assign =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_PropagationBaseObject_reset_action_on_fail =
+ R"doc(This method clears the failure callback.)doc";
+
+static const char*
+ __doc_operations_research_PropagationBaseObject_set_action_on_fail =
+ R"doc()doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_set_name =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_PropagationBaseObject_set_variable_to_clean_on_fail =
+ R"doc(Shortcut for variable cleaner.)doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_solver =
+ R"doc()doc";
+
+static const char* __doc_operations_research_PropagationBaseObject_solver_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_PropagationMonitor = R"doc()doc";
+
+static const char* __doc_operations_research_Queue = R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit =
+ R"doc(Usual limit based on wall_time, number of explored branches and number
+of failures in the search tree)doc";
+
+static const char* __doc_operations_research_RegularLimit_2 =
+ R"doc(Usual limit based on wall_time, number of explored branches and number
+of failures in the search tree)doc";
+
+static const char* __doc_operations_research_RegularLimitParameters =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_RegularLimit_AbsoluteSolverDeadline = R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_Accept = R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_CheckTime =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_CheckWithOffset =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_Copy = R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_ExitSearch =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_GetPercent =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_Init = R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_Install = R"doc()doc";
+
+static const char*
+ __doc_operations_research_RegularLimit_IsUncheckedSolutionLimitReached =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_MakeClone =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_MakeIdenticalClone =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_ProgressPercent =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_RegularLimit =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_TimeElapsed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_UpdateLimits =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_branches =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_branches_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_branches_offset =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_check_count =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_cumulative =
+ R"doc(If cumulative if false, then the limit applies to each search
+independently. If it's true, the limit applies globally to all search
+for which this monitor is used. When cumulative is true, the offset
+fields have two different meanings depending on context: - within a
+search, it's an offset to be subtracted from the current value -
+outside of search, it's the amount consumed in previous searches)doc";
+
+static const char* __doc_operations_research_RegularLimit_duration_limit =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_duration_limit_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_failures =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_failures_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_failures_offset =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_last_time_elapsed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_next_check =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_smart_time_check =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_solutions =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_solutions_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_solutions_offset =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_RegularLimit_solver_time_at_limit_start =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RegularLimit_wall_time =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Rev =
+ R"doc(This class adds reversibility to a POD type. It contains the stamp
+optimization. i.e. the SaveValue call is done only once per node of
+the search tree. Please note that actual stamps always starts at 1,
+thus an initial value of 0 will always trigger the first SaveValue.)doc";
+
+static const char* __doc_operations_research_RevArray =
+ R"doc(Reversible array of POD types. It contains the stamp optimization.
+I.e., the SaveValue call is done only once per node of the search
+tree. Please note that actual stamp always starts at 1, thus an
+initial value of 0 always triggers the first SaveValue.)doc";
+
+static const char* __doc_operations_research_RevArray_RevArray = R"doc()doc";
+
+static const char* __doc_operations_research_RevArray_SetValue = R"doc()doc";
+
+static const char* __doc_operations_research_RevArray_Value = R"doc()doc";
+
+static const char* __doc_operations_research_RevArray_operator_array =
+ R"doc()doc";
+
+static const char* __doc_operations_research_RevArray_size = R"doc()doc";
+
+static const char* __doc_operations_research_RevArray_size_2 = R"doc()doc";
+
+static const char* __doc_operations_research_RevArray_stamps = R"doc()doc";
+
+static const char* __doc_operations_research_RevArray_values = R"doc()doc";
+
+static const char* __doc_operations_research_RevBitMatrix = R"doc()doc";
+
+static const char* __doc_operations_research_Rev_Rev = R"doc()doc";
+
+static const char* __doc_operations_research_Rev_SetValue = R"doc()doc";
+
+static const char* __doc_operations_research_Rev_Value = R"doc()doc";
+
+static const char* __doc_operations_research_Rev_stamp = R"doc()doc";
+
+static const char* __doc_operations_research_Rev_value = R"doc()doc";
+
+static const char* __doc_operations_research_Search = R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit =
+ R"doc(Base class of all search limits.)doc";
+
+static const char* __doc_operations_research_SearchLimit_2 =
+ R"doc(Base class of all search limits.)doc";
+
+static const char* __doc_operations_research_SearchLimit_BeginNextDecision =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit_Check =
+ R"doc(This method is called to check the status of the limit. A return value
+of true indicates that we have indeed crossed the limit. In that case,
+this method will not be called again and the remaining search will be
+discarded.)doc";
+
+static const char* __doc_operations_research_SearchLimit_CheckWithOffset =
+ R"doc(Same as Check() but adds the 'offset' value to the current time when
+time is considered in the limit.)doc";
+
+static const char* __doc_operations_research_SearchLimit_Copy =
+ R"doc(Copy a limit. Warning: leads to a direct (no check) downcasting of
+'limit' so one needs to be sure both SearchLimits are of the same
+type.)doc";
+
+static const char* __doc_operations_research_SearchLimit_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit_EnterSearch =
+ R"doc(Internal methods.)doc";
+
+static const char* __doc_operations_research_SearchLimit_Init =
+ R"doc(This method is called when the search limit is initialized.)doc";
+
+static const char* __doc_operations_research_SearchLimit_Install = R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit_MakeClone =
+ R"doc(Allocates a clone of the limit.)doc";
+
+static const char* __doc_operations_research_SearchLimit_PeriodicCheck =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit_RefuteDecision =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit_SearchLimit =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit_SearchLimit_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit_TopPeriodicCheck =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit_crossed =
+ R"doc(Returns true if the limit has been crossed.)doc";
+
+static const char* __doc_operations_research_SearchLimit_crossed_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchLimit_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchMonitor =
+ R"doc(A search monitor is a simple set of callbacks to monitor all search
+events)doc";
+
+static const char* __doc_operations_research_SearchMonitor_2 =
+ R"doc(A search monitor is a simple set of callbacks to monitor all search
+events)doc";
+
+static const char* __doc_operations_research_SearchMonitor_Accept =
+ R"doc(Accepts the given model visitor.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_AcceptDelta =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchMonitor_AcceptNeighbor =
+ R"doc(After accepting a neighbor during local search.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_AcceptSolution =
+ R"doc(This method is called when a solution is found. It asserts whether the
+solution is valid. A value of false indicates that the solution should
+be discarded.)doc";
+
+static const char*
+ __doc_operations_research_SearchMonitor_AcceptUncheckedNeighbor =
+ R"doc(After accepting an unchecked neighbor during local search.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_AfterDecision =
+ R"doc(Just after refuting or applying the decision, apply is true after
+Apply. This is called only if the Apply() or Refute() methods have not
+failed.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_ApplyDecision =
+ R"doc(Before applying the decision.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_AtSolution =
+ R"doc(This method is called when a valid solution is found. If the return
+value is true, then search will resume after. If the result is false,
+then search will stop there.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_BeginFail =
+ R"doc(Just when the failure occurs.)doc";
+
+static const char*
+ __doc_operations_research_SearchMonitor_BeginInitialPropagation =
+ R"doc(Before the initial propagation.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_BeginNextDecision =
+ R"doc(Before calling DecisionBuilder::Next.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_EndFail =
+ R"doc(After completing the backtrack.)doc";
+
+static const char*
+ __doc_operations_research_SearchMonitor_EndInitialPropagation =
+ R"doc(After the initial propagation.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_EndNextDecision =
+ R"doc(After calling DecisionBuilder::Next, along with the returned decision.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_EnterSearch =
+ R"doc(Beginning of the search.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_ExitSearch =
+ R"doc(End of the search.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_Install =
+ R"doc(Registers itself on the solver such that it gets notified of the
+search and propagation events. Override to incrementally install
+listeners for specific events.)doc";
+
+static const char*
+ __doc_operations_research_SearchMonitor_IsUncheckedSolutionLimitReached =
+ R"doc(Returns true if the limit of solutions has been reached including
+unchecked solutions.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_ListenToEvent =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchMonitor_LocalOptimum =
+ R"doc(When a local optimum is reached. If 'true' is returned, the last
+solution is discarded and the search proceeds with the next one.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_NoMoreSolutions =
+ R"doc(When the search tree is finished.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_PeriodicCheck =
+ R"doc(Periodic call to check limits in long running methods.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_ProgressPercent =
+ R"doc(Returns a percentage representing the propress of the search before
+reaching limits.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_RefuteDecision =
+ R"doc(Before refuting the decision.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_RestartSearch =
+ R"doc(Restart the search.)doc";
+
+static const char* __doc_operations_research_SearchMonitor_SearchMonitor =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchMonitor_SearchMonitor_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchMonitor_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SearchMonitor_solver = R"doc()doc";
+
+static const char* __doc_operations_research_SearchMonitor_solver_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVar =
+ R"doc(A sequence variable is a variable whose domain is a set of possible
+orderings of the interval variables. It allows ordering of tasks. It
+has two sets of methods: ComputePossibleFirstsAndLasts(), which
+returns the list of interval variables that can be ranked first or
+last; and RankFirst/RankNotFirst/RankLast/RankNotLast, which can be
+used to create the search decision.)doc";
+
+static const char* __doc_operations_research_SequenceVar_2 =
+ R"doc(A sequence variable is a variable whose domain is a set of possible
+orderings of the interval variables. It allows ordering of tasks. It
+has two sets of methods: ComputePossibleFirstsAndLasts(), which
+returns the list of interval variables that can be ranked first or
+last; and RankFirst/RankNotFirst/RankLast/RankNotLast, which can be
+used to create the search decision.)doc";
+
+static const char* __doc_operations_research_SequenceVarAssignment =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement =
+ R"doc(The SequenceVarElement stores a partial representation of ranked
+interval variables in the underlying sequence variable. This
+representation consists of three vectors: - the forward sequence. That
+is the list of interval variables ranked first in the sequence. The
+first element of the backward sequence is the first interval in the
+sequence variable. - the backward sequence. That is the list of
+interval variables ranked last in the sequence. The first element of
+the backward sequence is the last interval in the sequence variable. -
+The list of unperformed interval variables. Furthermore, if all
+performed variables are ranked, then by convention, the
+forward_sequence will contain all such variables and the
+backward_sequence will be empty.)doc";
+
+static const char*
+ __doc_operations_research_SequenceVarElement_BackwardSequence = R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_Bound =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVarElement_CheckClassInvariants =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_Clone =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_Copy =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_DebugString =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVarElement_ForwardSequence = R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_LoadFromProto =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_Reset =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_Restore =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVarElement_SequenceVarElement =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVarElement_SequenceVarElement_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVarElement_SetBackwardSequence =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVarElement_SetForwardSequence =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_SetSequence =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_SetUnperformed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_Store =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_Unperformed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_Var =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_WriteToProto =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVarElement_backward_sequence =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVarElement_forward_sequence = R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_operator_eq =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_operator_ne =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_unperformed =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVarElement_var =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVar_Accept =
+ R"doc(Accepts the given visitor.)doc";
+
+static const char* __doc_operations_research_SequenceVar_ActiveHorizonRange =
+ R"doc(Returns the minimum start min and the maximum end max of all unranked
+interval vars in the sequence.)doc";
+
+static const char*
+ __doc_operations_research_SequenceVar_ComputeBackwardFrontier = R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVar_ComputeForwardFrontier = R"doc()doc";
+
+static const char*
+ __doc_operations_research_SequenceVar_ComputePossibleFirstsAndLasts =
+ R"doc(Computes the set of indices of interval variables that can be ranked
+first in the set of unranked activities.)doc";
+
+static const char* __doc_operations_research_SequenceVar_ComputeStatistics =
+ R"doc(Compute statistics on the sequence.)doc";
+
+static const char* __doc_operations_research_SequenceVar_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVar_DurationRange =
+ R"doc(Returns the minimum and maximum duration of combined interval vars in
+the sequence.)doc";
+
+static const char* __doc_operations_research_SequenceVar_FillSequence =
+ R"doc(Clears 'rank_first' and 'rank_last', and fills them with the intervals
+in the order of the ranks. If all variables are ranked, 'rank_first'
+will contain all variables, and 'rank_last' will contain none.
+'unperformed' will contains all such interval variables. rank_first
+and rank_last represents different directions. rank_first[0]
+corresponds to the first interval of the sequence. rank_last[0]
+corresponds to the last interval of the sequence.)doc";
+
+static const char* __doc_operations_research_SequenceVar_HorizonRange =
+ R"doc(Returns the minimum start min and the maximum end max of all interval
+vars in the sequence.)doc";
+
+static const char* __doc_operations_research_SequenceVar_Interval =
+ R"doc(Returns the index_th interval of the sequence.)doc";
+
+static const char* __doc_operations_research_SequenceVar_Next =
+ R"doc(Returns the next of the index_th interval of the sequence.)doc";
+
+static const char* __doc_operations_research_SequenceVar_RankFirst =
+ R"doc(Ranks the index_th interval var first of all unranked interval vars.
+After that, it will no longer be considered ranked.)doc";
+
+static const char* __doc_operations_research_SequenceVar_RankLast =
+ R"doc(Ranks the index_th interval var first of all unranked interval vars.
+After that, it will no longer be considered ranked.)doc";
+
+static const char* __doc_operations_research_SequenceVar_RankNotFirst =
+ R"doc(Indicates that the index_th interval var will not be ranked first of
+all currently unranked interval vars.)doc";
+
+static const char* __doc_operations_research_SequenceVar_RankNotLast =
+ R"doc(Indicates that the index_th interval var will not be ranked first of
+all currently unranked interval vars.)doc";
+
+static const char* __doc_operations_research_SequenceVar_RankSequence =
+ R"doc(Applies the following sequence of ranks, ranks first, then rank last.
+rank_first and rank_last represents different directions.
+rank_first[0] corresponds to the first interval of the sequence.
+rank_last[0] corresponds to the last interval of the sequence. All
+intervals in the unperformed vector will be marked as such.)doc";
+
+static const char* __doc_operations_research_SequenceVar_SequenceVar =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVar_UpdatePrevious =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVar_intervals =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVar_nexts = R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVar_previous = R"doc()doc";
+
+static const char* __doc_operations_research_SequenceVar_size =
+ R"doc(Returns the number of interval vars in the sequence.)doc";
+
+static const char* __doc_operations_research_SetAssignmentFromAssignment =
+ R"doc(Given a "source_assignment", clears the "target_assignment" and adds
+all IntVars in "target_vars", with the values of the variables set
+according to the corresponding values of "source_vars" in
+"source_assignment". source_vars and target_vars must have the same
+number of elements. The source and target assignments can belong to
+different Solvers.)doc";
+
+static const char* __doc_operations_research_SimpleRevFIFO = R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector =
+ R"doc(This class is the root class of all solution collectors. It implements
+a basic query API to be used independently of the collector used.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_2 =
+ R"doc(This class is the root class of all solution collectors. It implements
+a basic query API to be used independently of the collector used.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_Add =
+ R"doc(Add API.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_Add_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_Add_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_Add_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_Add_5 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_Add_6 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_AddObjective =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_AddObjectives =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_BackwardSequence =
+ R"doc(This is a shortcut to get the BackwardSequence of 'var' in the nth
+solution. The backward sequence is the list of ranked interval
+variables starting from the end of the sequence.)doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_BuildSolutionDataForCurrentState =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_DebugString =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_DurationValue =
+ R"doc(This is a shortcut to get the DurationValue of 'var' in the nth
+solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_EndValue =
+ R"doc(This is a shortcut to get the EndValue of 'var' in the nth solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_EnterSearch =
+ R"doc(Beginning of the search.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_ForwardSequence =
+ R"doc(This is a shortcut to get the ForwardSequence of 'var' in the nth
+solution. The forward sequence is the list of ranked interval
+variables starting from the start of the sequence.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_FreeSolution =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_Install =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_ObjectiveValueFromIndex =
+ R"doc(Returns the value of the index-th objective of the nth solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_PerformedValue =
+ R"doc(This is a shortcut to get the PerformedValue of 'var' in the nth
+solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_PopSolution =
+ R"doc(Remove and delete the last popped solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_Push =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_PushSolution =
+ R"doc(Push the current state as a new solution.)doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionCollector = R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionCollector_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionCollector_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_SolutionData =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionData_ObjectiveValue =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionData_ObjectiveValueFromIndex =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionData_branches =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionData_failures =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionData_operator_lt =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionData_solution =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_SolutionData_time = R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_StartValue =
+ R"doc(This is a shortcut to get the StartValue of 'var' in the nth solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_Unperformed =
+ R"doc(This is a shortcut to get the list of unperformed of 'var' in the nth
+solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_Value =
+ R"doc(This is a shortcut to get the Value of 'var' in the nth solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_branches =
+ R"doc(Returns the number of branches when the nth solution was found.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_check_index =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_failures =
+ R"doc(Returns the number of failures encountered at the time of the nth
+solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_has_solution =
+ R"doc(Returns whether any solutions were stored during the search.)doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_last_solution_or_null =
+ R"doc(Returns the last solution if there are any, nullptr otherwise.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_objective_value =
+ R"doc(Returns the objective value of the nth solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_prototype =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_SolutionCollector_recycle_solutions = R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_solution =
+ R"doc(Returns the nth solution.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_solution_count =
+ R"doc(Returns how many solutions were stored during the search.)doc";
+
+static const char* __doc_operations_research_SolutionCollector_solution_data =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_solution_pool =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionCollector_wall_time =
+ R"doc(Returns the wall time in ms for the nth solution.)doc";
+
+static const char* __doc_operations_research_SolutionPool =
+ R"doc(This class is used to manage a pool of solutions. It can transform a
+single point local search into a multipoint local search.)doc";
+
+static const char* __doc_operations_research_SolutionPool_2 =
+ R"doc(This class is used to manage a pool of solutions. It can transform a
+single point local search into a multipoint local search.)doc";
+
+static const char* __doc_operations_research_SolutionPool_GetNextSolution =
+ R"doc(This method is called when the local search starts a new neighborhood
+to initialize the default assignment.)doc";
+
+static const char* __doc_operations_research_SolutionPool_Initialize =
+ R"doc(This method is called to initialize the solution pool with the
+assignment from the local search.)doc";
+
+static const char* __doc_operations_research_SolutionPool_RegisterNewSolution =
+ R"doc(This method is called when a new solution has been accepted by the
+local search.)doc";
+
+static const char* __doc_operations_research_SolutionPool_SolutionPool =
+ R"doc()doc";
+
+static const char* __doc_operations_research_SolutionPool_SyncNeeded =
+ R"doc(This method checks if the local solution needs to be updated with an
+external one.)doc";
+
+static const char* __doc_operations_research_Solver =
+ R"doc(Solver Class
+
+A solver represents the main computation engine. It implements the
+entire range of Constraint Programming protocols: - Reversibility -
+Propagation - Search
+
+Usually, Constraint Programming code consists of - the creation of the
+Solver, - the creation of the decision variables of the model, - the
+creation of the constraints of the model and their addition to the
+solver() through the AddConstraint() method, - the creation of the
+main DecisionBuilder class, - the launch of the solve() method with
+the decision builder.
+
+For the time being, Solver is neither MT_SAFE nor MT_HOT.)doc";
+
+static const char* __doc_operations_research_Solver_ABSL_DEPRECATED =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_ABSL_DEPRECATED_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Accept =
+ R"doc(Accepts the given model visitor.)doc";
+
+static const char* __doc_operations_research_Solver_ActiveSearch =
+ R"doc(Returns the active search, nullptr outside search.)doc";
+
+static const char* __doc_operations_research_Solver_AddBacktrackAction =
+ R"doc(When SaveValue() is not the best way to go, one can create a
+reversible action that will be called upon backtrack. The "fast"
+parameter indicates whether we need restore all values saved through
+SaveValue() before calling this method.)doc";
+
+static const char* __doc_operations_research_Solver_AddCastConstraint =
+ R"doc(Adds 'constraint' to the solver and marks it as a cast constraint,
+that is, a constraint created calling Var() on an expression. This is
+used internally.)doc";
+
+static const char* __doc_operations_research_Solver_AddConstraint =
+ R"doc(Adds the constraint 'c' to the model.
+
+After calling this method, and until there is a backtrack that undoes
+the addition, any assignment of variables to values must satisfy the
+given constraint in order to be considered feasible. There are two
+fairly different use cases:
+
+- the most common use case is modeling: the given constraint is really
+part of the problem that the user is trying to solve. In this use
+case, AddConstraint is called outside of search (i.e., with ``state()
+== OUTSIDE_SEARCH``). Most users should only use AddConstraint in this
+way. In this case, the constraint will belong to the model forever: it
+cannot be removed by backtracking.
+
+- a rarer use case is that 'c' is not a real constraint of the model.
+It may be a constraint generated by a branching decision (a constraint
+whose goal is to restrict the search space), a symmetry breaking
+constraint (a constraint that does restrict the search space, but in a
+way that cannot have an impact on the quality of the solutions in the
+subtree), or an inferred constraint that, while having no semantic
+value to the model (it does not restrict the set of solutions), is
+worth having because we believe it may strengthen the propagation. In
+these cases, it happens that the constraint is added during the search
+(i.e., with state() == IN_SEARCH or state() == IN_ROOT_NODE). When a
+constraint is added during a search, it applies only to the subtree of
+the search tree rooted at the current node, and will be automatically
+removed by backtracking.
+
+This method does not take ownership of the constraint. If the
+constraint has been created by any factory method (Solver::MakeXXX),
+it will automatically be deleted. However, power users who implement
+their own constraints should do:
+solver.AddConstraint(solver.RevAlloc(new MyConstraint(...));)doc";
+
+static const char* __doc_operations_research_Solver_AddLocalSearchMonitor =
+ R"doc(Adds the local search monitor to the solver. This is called internally
+when a propagation monitor is passed to the Solve() or NewSearch()
+method.)doc";
+
+static const char* __doc_operations_research_Solver_AddPropagationMonitor =
+ R"doc(Adds the propagation monitor to the solver. This is called internally
+when a propagation monitor is passed to the Solve() or NewSearch()
+method.)doc";
+
+static const char* __doc_operations_research_Solver_BacktrackOneLevel =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_BacktrackToSentinel =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_BinaryIntervalRelation =
+ R"doc(This enum is used in Solver::MakeIntervalVarRelation to specify the
+temporal relation between the two intervals t1 and t2.)doc";
+
+static const char*
+ __doc_operations_research_Solver_BinaryIntervalRelation_ENDS_AFTER_END =
+ R"doc(t1 ends after t2 end, i.e. End(t1) >= End(t2) + delay.)doc";
+
+static const char*
+ __doc_operations_research_Solver_BinaryIntervalRelation_ENDS_AFTER_START =
+ R"doc(t1 ends after t2 start, i.e. End(t1) >= Start(t2) + delay.)doc";
+
+static const char*
+ __doc_operations_research_Solver_BinaryIntervalRelation_ENDS_AT_END =
+ R"doc(t1 ends at t2 end, i.e. End(t1) == End(t2) + delay.)doc";
+
+static const char*
+ __doc_operations_research_Solver_BinaryIntervalRelation_ENDS_AT_START =
+ R"doc(t1 ends at t2 start, i.e. End(t1) == Start(t2) + delay.)doc";
+
+static const char*
+ __doc_operations_research_Solver_BinaryIntervalRelation_STARTS_AFTER_END =
+ R"doc(t1 starts after t2 end, i.e. Start(t1) >= End(t2) + delay.)doc";
+
+static const char*
+ __doc_operations_research_Solver_BinaryIntervalRelation_STARTS_AFTER_START =
+ R"doc(t1 starts after t2 start, i.e. Start(t1) >= Start(t2) + delay.)doc";
+
+static const char*
+ __doc_operations_research_Solver_BinaryIntervalRelation_STARTS_AT_END =
+ R"doc(t1 starts at t2 end, i.e. Start(t1) == End(t2) + delay.)doc";
+
+static const char*
+ __doc_operations_research_Solver_BinaryIntervalRelation_STARTS_AT_START =
+ R"doc(t1 starts at t2 start, i.e. Start(t1) == Start(t2) + delay.)doc";
+
+static const char*
+ __doc_operations_research_Solver_BinaryIntervalRelation_STAYS_IN_SYNC =
+ R"doc(STARTS_AT_START and ENDS_AT_END at the same time. t1 starts at t2
+start, i.e. Start(t1) == Start(t2) + delay. t1 ends at t2 end, i.e.
+End(t1) == End(t2).)doc";
+
+static const char* __doc_operations_research_Solver_Cache =
+ R"doc(Returns the cache of the model.)doc";
+
+static const char* __doc_operations_research_Solver_CastExpression =
+ R"doc(Internal. If the variables is the result of expr->Var(), this method
+returns expr, nullptr otherwise.)doc";
+
+static const char* __doc_operations_research_Solver_CheckAssignment =
+ R"doc(Checks whether the given assignment satisfies all relevant
+constraints.)doc";
+
+static const char* __doc_operations_research_Solver_CheckConstraint =
+ R"doc(Checks whether adding this constraint will lead to an immediate
+failure. It will return false if the model is already inconsistent, or
+if adding the constraint makes it inconsistent.)doc";
+
+static const char* __doc_operations_research_Solver_CheckFail = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_ClearLocalSearchState =
+ R"doc(Clears the local search state.)doc";
+
+static const char* __doc_operations_research_Solver_ClearNeighbors =
+ R"doc(Manipulate neighbors count; to be used for testing purposes only.
+TODO(user): Find a workaround to avoid exposing this.)doc";
+
+static const char* __doc_operations_research_Solver_Compose =
+ R"doc(Creates a decision builder which sequentially composes decision
+builders. At each leaf of a decision builder, the next decision
+builder is therefore called. For instance, Compose(db1, db2) will
+result in the following tree: d1 tree | / | \ | db1 leaves | / | \ |
+db2 tree db2 tree db2 tree |)doc";
+
+static const char* __doc_operations_research_Solver_Compose_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Compose_3 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Compose_4 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_ConcatenateOperators =
+ R"doc(Creates a local search operator which concatenates a vector of
+operators. Each operator from the vector is called sequentially. By
+default, when a neighbor is found the neighborhood exploration
+restarts from the last active operator (the one which produced the
+neighbor). This can be overridden by setting restart to true to force
+the exploration to start from the first operator in the vector.
+
+The default behavior can also be overridden using an evaluation
+callback to set the order in which the operators are explored (the
+callback is called in LocalSearchOperator::Start()). The first
+argument of the callback is the index of the operator which produced
+the last move, the second argument is the index of the operator to be
+evaluated. Ownership of the callback is taken by ConcatenateOperators.
+
+Example:
+
+const int kPriorities = {10, 100, 10, 0}; int64_t Evaluate(int
+active_operator, int current_operator) { return
+kPriorities[current_operator]; }
+
+LocalSearchOperator* concat = solver.ConcatenateOperators(operators,
+NewPermanentCallback(&Evaluate));
+
+The elements of the vector operators will be sorted by increasing
+priority and explored in that order (tie-breaks are handled by keeping
+the relative operator order in the vector). This would result in the
+following order: operators[3], operators[0], operators[2],
+operators[1].)doc";
+
+static const char* __doc_operations_research_Solver_ConcatenateOperators_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_ConcatenateOperators_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_CurrentlyInSolve =
+ R"doc(Returns true whether the current search has been created using a
+Solve() call instead of a NewSearch one. It returns false if the
+solver is not in search at all.)doc";
+
+static const char* __doc_operations_research_Solver_DebugString =
+ R"doc(misc debug string.)doc";
+
+static const char* __doc_operations_research_Solver_DecisionModification =
+ R"doc(The Solver is responsible for creating the search tree. Thanks to the
+DecisionBuilder, it creates a new decision with two branches at each
+node: left and right. The DecisionModification enum is used to specify
+how the branch selector should behave.)doc";
+
+static const char*
+ __doc_operations_research_Solver_DecisionModification_KEEP_LEFT =
+ R"doc(Right branches are ignored. This is used to make the code faster when
+backtrack makes no sense or is not useful. This is faster as there is
+no need to create one new node per decision.)doc";
+
+static const char*
+ __doc_operations_research_Solver_DecisionModification_KEEP_RIGHT =
+ R"doc(Left branches are ignored. This is used to make the code faster when
+backtrack makes no sense or is not useful. This is faster as there is
+no need to create one new node per decision.)doc";
+
+static const char*
+ __doc_operations_research_Solver_DecisionModification_KILL_BOTH =
+ R"doc(Backtracks to the previous decisions, i.e. left and right branches are
+not applied.)doc";
+
+static const char*
+ __doc_operations_research_Solver_DecisionModification_NO_CHANGE =
+ R"doc(Keeps the default behavior, i.e. apply left branch first, and then
+right branch in case of backtracking.)doc";
+
+static const char*
+ __doc_operations_research_Solver_DecisionModification_SWITCH_BRANCHES =
+ R"doc(Applies right branch first. Left branch will be applied in case of
+backtracking.)doc";
+
+static const char* __doc_operations_research_Solver_DefaultSolverParameters =
+ R"doc(Create a ConstraintSolverParameters proto with all the default values.)doc";
+
+static const char* __doc_operations_research_Solver_DemonPriority =
+ R"doc(This enum represents the three possible priorities for a demon in the
+Solver queue. Note: this is for advanced users only.)doc";
+
+static const char*
+ __doc_operations_research_Solver_DemonPriority_DELAYED_PRIORITY =
+ R"doc(DELAYED_PRIORITY is the lowest priority: Demons will be processed
+after VAR_PRIORITY and NORMAL_PRIORITY demons.)doc";
+
+static const char*
+ __doc_operations_research_Solver_DemonPriority_NORMAL_PRIORITY =
+ R"doc(NORMAL_PRIORITY is the highest priority: Demons will be processed
+first.)doc";
+
+static const char* __doc_operations_research_Solver_DemonPriority_VAR_PRIORITY =
+ R"doc(VAR_PRIORITY is between DELAYED_PRIORITY and NORMAL_PRIORITY.)doc";
+
+static const char* __doc_operations_research_Solver_EndSearch = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_EnqueueAll = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_EnqueueDelayedDemon =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_EnqueueVar = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_EvaluatorLocalSearchOperators =
+ R"doc(This enum is used in Solver::MakeOperator associated with an evaluator
+to specify the neighborhood to create.)doc";
+
+static const char*
+ __doc_operations_research_Solver_EvaluatorLocalSearchOperators_LK =
+ R"doc(Lin-Kernighan local search. While the accumulated local gain is
+positive, perform a 2opt or a 3opt move followed by a series of 2opt
+moves. Return a neighbor for which the global gain is positive.)doc";
+
+static const char*
+ __doc_operations_research_Solver_EvaluatorLocalSearchOperators_TSPLNS =
+ R"doc(TSP-base LNS. Randomly merge consecutive nodes until n "meta"-nodes
+remain and solve the corresponding TSP. This is an "unlimited"
+neighborhood which must be stopped by search limits. To force
+diversification, the operator iteratively forces each node to serve as
+base of a meta-node.)doc";
+
+static const char*
+ __doc_operations_research_Solver_EvaluatorLocalSearchOperators_TSPOPT =
+ R"doc(Sliding TSP operator. Uses an exact dynamic programming algorithm to
+solve the TSP corresponding to path sub-chains. For a subchain 1 -> 2
+-> 3 -> 4 -> 5 -> 6, solves the TSP on nodes A, 2, 3, 4, 5, where A is
+a merger of nodes 1 and 6 such that cost(A,i) = cost(1,i) and
+cost(i,A) = cost(i,6).)doc";
+
+static const char* __doc_operations_research_Solver_EvaluatorStrategy =
+ R"doc(This enum is used by Solver::MakePhase to specify how to select
+variables and values during the search. In Solver::MakePhase(const
+std::vector&, IntVarStrategy, IntValueStrategy), variables
+are selected first, and then the associated value. In
+Solver::MakePhase(const std::vector& vars, IndexEvaluator2,
+EvaluatorStrategy), the selection is done scanning every pair
+. The next selected pair is then the best
+among all possibilities, i.e. the pair with the smallest evaluation.
+As this is costly, two options are offered: static or dynamic
+evaluation.)doc";
+
+static const char*
+ __doc_operations_research_Solver_EvaluatorStrategy_CHOOSE_DYNAMIC_GLOBAL_BEST =
+ R"doc(Pairs are compared each time a variable is selected. That way all
+pairs are relevant and evaluation is accurate. This strategy runs in
+O(number-of-pairs) at each variable selection, versus O(1) in the
+static version.)doc";
+
+static const char*
+ __doc_operations_research_Solver_EvaluatorStrategy_CHOOSE_STATIC_GLOBAL_BEST =
+ R"doc(Pairs are compared at the first call of the selector, and results are
+cached. Next calls to the selector use the previous computation, and
+so are not up-to-date, e.g. some pairs may not be
+possible anymore due to propagation since the first to call.)doc";
+
+static const char* __doc_operations_research_Solver_ExecuteAll = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_ExportProfilingOverview =
+ R"doc(Exports the profiling information in a human readable overview. The
+parameter profile_level used to create the solver must be set to true.)doc";
+
+static const char* __doc_operations_research_Solver_Fail =
+ R"doc(Abandon the current branch in the search tree. A backtrack will
+follow.)doc";
+
+static const char* __doc_operations_research_Solver_FinishCurrentSearch =
+ R"doc(Tells the solver to kill or restart the current search.)doc";
+
+static const char* __doc_operations_research_Solver_FreezeQueue = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_GetConstraintSolverStatistics =
+ R"doc(Returns detailed cp search statistics.)doc";
+
+static const char* __doc_operations_research_Solver_GetLocalSearchMonitor =
+ R"doc(Returns the local search monitor.)doc";
+
+static const char* __doc_operations_research_Solver_GetLocalSearchStatistics =
+ R"doc(Returns detailed local search statistics.)doc";
+
+static const char* __doc_operations_research_Solver_GetName = R"doc(Naming)doc";
+
+static const char* __doc_operations_research_Solver_GetNewIntVarIndex =
+ R"doc(Variable indexing (note that indexing is not reversible). Returns a
+new index for an IntVar.)doc";
+
+static const char*
+ __doc_operations_research_Solver_GetOrCreateLocalSearchState =
+ R"doc(Returns (or creates) an assignment representing the state of local
+search.)doc";
+
+static const char* __doc_operations_research_Solver_GetPropagationMonitor =
+ R"doc(Returns the propagation monitor.)doc";
+
+static const char* __doc_operations_research_Solver_HasName =
+ R"doc(Returns whether the object has been named or not.)doc";
+
+static const char* __doc_operations_research_Solver_ImprovementSearchLimit =
+ R"doc(Limits the search based on the improvements of 'objective_var'. Stops
+the search when the improvement rate gets lower than a threshold
+value. This threshold value is computed based on the improvement rate
+during the first phase of the search.)doc";
+
+static const char* __doc_operations_research_Solver_IncrementNeighbors =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_IncrementUncheckedSolutionCounter =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Init = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InitCachedConstraint =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InitCachedIntConstants =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InstrumentsDemons =
+ R"doc(Returns whether we are instrumenting demons.)doc";
+
+static const char* __doc_operations_research_Solver_InstrumentsVariables =
+ R"doc(Returns whether we are tracing variables.)doc";
+
+static const char* __doc_operations_research_Solver_IntValueStrategy =
+ R"doc(This enum describes the strategy used to select the next variable
+value to set.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntValueStrategy_ASSIGN_CENTER_VALUE =
+ R"doc(Selects the first possible value which is the closest to the center of
+the domain of the selected variable. The center is defined as (min +
+max) / 2.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntValueStrategy_ASSIGN_MAX_VALUE =
+ R"doc(Selects the max value of the selected variable.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntValueStrategy_ASSIGN_MIN_VALUE =
+ R"doc(Selects the min value of the selected variable.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntValueStrategy_ASSIGN_RANDOM_VALUE =
+ R"doc(Selects randomly one of the possible values of the selected variable.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntValueStrategy_INT_VALUE_DEFAULT =
+ R"doc(The default behavior is ASSIGN_MIN_VALUE.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntValueStrategy_INT_VALUE_SIMPLE =
+ R"doc(The simple selection is ASSIGN_MIN_VALUE.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntValueStrategy_SPLIT_LOWER_HALF =
+ R"doc(Split the domain in two around the center, and choose the lower part
+first.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntValueStrategy_SPLIT_UPPER_HALF =
+ R"doc(Split the domain in two around the center, and choose the lower part
+first.)doc";
+
+static const char* __doc_operations_research_Solver_IntVarStrategy =
+ R"doc(This enum describes the strategy used to select the next branching
+variable at each node during the search.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_FIRST_UNBOUND =
+ R"doc(Select the first unbound variable. Variables are considered in the
+order of the vector of IntVars used to create the selector.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_HIGHEST_MAX =
+ R"doc(Among unbound variables, select the variable with the highest maximal
+value. In case of a tie, the first one is selected, first being
+defined by the order in the vector of IntVars used to create the
+selector.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_LOWEST_MIN =
+ R"doc(Among unbound variables, select the variable with the smallest minimal
+value. In case of a tie, the first one is selected, "first" defined by
+the order in the vector of IntVars used to create the selector.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_MAX_REGRET_ON_MIN =
+ R"doc(Among unbound variables, select the variable with the largest gap
+between the first and the second values of the domain.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_MAX_SIZE =
+ R"doc(Among unbound variables, select the variable with the highest size. In
+case of a tie, the first one is selected, first being defined by the
+order in the vector of IntVars used to create the selector.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_MIN_SIZE =
+ R"doc(Among unbound variables, select the variable with the smallest size.
+In case of a tie, the first one is selected, first being defined by
+the order in the vector of IntVars used to create the selector.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_MIN_SIZE_HIGHEST_MAX =
+ R"doc(Among unbound variables, select the variable with the smallest size,
+i.e., the smallest number of possible values. In case of a tie, the
+selected variable is the one with the highest max value. In case of a
+tie, the first one is selected, first being defined by the order in
+the vector of IntVars used to create the selector.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_MIN_SIZE_HIGHEST_MIN =
+ R"doc(Among unbound variables, select the variable with the smallest size,
+i.e., the smallest number of possible values. In case of a tie, the
+selected variable is the one with the highest min value. In case of a
+tie, the first one is selected, first being defined by the order in
+the vector of IntVars used to create the selector.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_MIN_SIZE_LOWEST_MAX =
+ R"doc(Among unbound variables, select the variable with the smallest size,
+i.e., the smallest number of possible values. In case of a tie, the
+selected variables is the one with the lowest max value. In case of a
+tie, the first one is selected, first being defined by the order in
+the vector of IntVars used to create the selector.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_MIN_SIZE_LOWEST_MIN =
+ R"doc(Among unbound variables, select the variable with the smallest size,
+i.e., the smallest number of possible values. In case of a tie, the
+selected variables is the one with the lowest min value. In case of a
+tie, the first one is selected, first being defined by the order in
+the vector of IntVars used to create the selector.)doc";
+
+static const char* __doc_operations_research_Solver_IntVarStrategy_CHOOSE_PATH =
+ R"doc(Selects the next unbound variable on a path, the path being defined by
+the variables: var[i] corresponds to the index of the next of i.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_CHOOSE_RANDOM =
+ R"doc(Randomly select one of the remaining unbound variables.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_INT_VAR_DEFAULT =
+ R"doc(The default behavior is CHOOSE_FIRST_UNBOUND.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntVarStrategy_INT_VAR_SIMPLE =
+ R"doc(The simple selection is CHOOSE_FIRST_UNBOUND.)doc";
+
+static const char* __doc_operations_research_Solver_IntegerCastInfo =
+ R"doc(Holds semantic information stating that the 'expression' has been cast
+into 'variable' using the Var() method, and that 'maintainer' is
+responsible for maintaining the equality between 'variable' and
+'expression'.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntegerCastInfo_IntegerCastInfo =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_IntegerCastInfo_IntegerCastInfo_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_IntegerCastInfo_expression =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_IntegerCastInfo_maintainer =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_IntegerCastInfo_variable =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InternalSaveValue =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InternalSaveValue_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InternalSaveValue_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InternalSaveValue_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InternalSaveValue_5 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InternalSaveValue_6 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_InternalSaveValue_7 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_IntervalStrategy =
+ R"doc(This enum describes the straregy used to select the next interval
+variable and its value to be fixed.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntervalStrategy_INTERVAL_DEFAULT =
+ R"doc(The default is INTERVAL_SET_TIMES_FORWARD.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntervalStrategy_INTERVAL_SET_TIMES_BACKWARD =
+ R"doc(Selects the variable with the highest ending time of all variables,
+and fixes the ending time to this highest values.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntervalStrategy_INTERVAL_SET_TIMES_FORWARD =
+ R"doc(Selects the variable with the lowest starting time of all variables,
+and fixes its starting time to this lowest value.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IntervalStrategy_INTERVAL_SIMPLE =
+ R"doc(The simple is INTERVAL_SET_TIMES_FORWARD.)doc";
+
+static const char* __doc_operations_research_Solver_IsADifference =
+ R"doc(Internal.)doc";
+
+static const char* __doc_operations_research_Solver_IsBooleanVar =
+ R"doc(Returns true if expr represents either boolean_var or 1 - boolean_var.
+In that case, it fills inner_var and is_negated to be true if the
+expression is 1 - boolean_var -- equivalent to not(boolean_var).)doc";
+
+static const char*
+ __doc_operations_research_Solver_IsLocalSearchProfilingEnabled =
+ R"doc(Returns whether we are profiling local search.)doc";
+
+static const char* __doc_operations_research_Solver_IsProduct =
+ R"doc(Returns true if expr represents a product of a expr and a constant. In
+that case, it fills inner_expr and coefficient with these, and returns
+true. In the other case, it fills inner_expr with expr, coefficient
+with 1, and returns false.)doc";
+
+static const char* __doc_operations_research_Solver_IsProfilingEnabled =
+ R"doc(Returns whether we are profiling the solver.)doc";
+
+static const char*
+ __doc_operations_research_Solver_IsUncheckedSolutionLimitReached =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_JumpToSentinel =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_JumpToSentinelWhenNested =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_LocalSearchFilterBound =
+ R"doc(This enum is used in Solver::MakeLocalSearchObjectiveFilter. It
+specifies the behavior of the objective filter to create. The goal is
+to define under which condition a move is accepted based on the
+current objective value.)doc";
+
+static const char* __doc_operations_research_Solver_LocalSearchFilterBound_EQ =
+ R"doc(Move is accepted when the current objective value is in the interval
+objective.Min .. objective.Max.)doc";
+
+static const char* __doc_operations_research_Solver_LocalSearchFilterBound_GE =
+ R"doc(Move is accepted when the current objective value >= objective.Min.)doc";
+
+static const char* __doc_operations_research_Solver_LocalSearchFilterBound_LE =
+ R"doc(Move is accepted when the current objective value <= objective.Max.)doc";
+
+static const char* __doc_operations_research_Solver_LocalSearchOperators =
+ R"doc(This enum is used in Solver::MakeOperator to specify the neighborhood
+to create.)doc";
+
+static const char* __doc_operations_research_Solver_LocalSearchOperators_CROSS =
+ R"doc(Operator which cross exchanges the starting chains of 2 paths,
+including exchanging the whole paths. First and last nodes are not
+moved. Possible neighbors for the paths 1 -> 2 -> 3 -> 4 -> 5 and 6 ->
+7 -> 8 (where (1, 5) and (6, 8) are first and last nodes of the paths
+and can therefore not be moved): 1 -> [7] -> 3 -> 4 -> 5 6 -> [2] -> 8
+1 -> [7] -> 4 -> 5 6 -> [2 -> 3] -> 8 1 -> [7] -> 5 6 -> [2 -> 3 -> 4]
+-> 8)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_DECREMENT =
+ R"doc(Operator which defines a neighborhood to decrement values. The
+behavior is the same as INCREMENT, except values are decremented
+instead of incremented.)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_EXCHANGE =
+ R"doc(Operator which exchanges the positions of two nodes. Possible
+neighbors for the path 1 -> 2 -> 3 -> 4 -> 5 (where (1, 5) are first
+and last nodes of the path and can therefore not be moved): 1 -> [3]
+-> [2] -> 4 -> 5 1 -> [4] -> 3 -> [2] -> 5 1 -> 2 -> [4] -> [3] -> 5)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_EXTENDEDSWAPACTIVE =
+ R"doc(Operator which makes an inactive node active and an active one
+inactive. It is similar to SwapActiveOperator except that it tries to
+insert the inactive node in all possible positions instead of just the
+position of the node made inactive. Possible neighbors for the path 1
+-> 2 -> 3 -> 4 with 5 inactive (where 1 and 4 are first and last nodes
+of the path) are: 1 -> [5] -> 3 -> 4 with 2 inactive 1 -> 3 -> [5] ->
+4 with 2 inactive 1 -> [5] -> 2 -> 4 with 3 inactive 1 -> 2 -> [5] ->
+4 with 3 inactive)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_FULLPATHLNS =
+ R"doc(Operator which relaxes one entire path and all inactive nodes, thus
+defining num_paths neighbors.)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_INCREMENT =
+ R"doc(Operator which defines one neighbor per variable. Each neighbor tries
+to increment by one the value of the corresponding variable. When a
+new solution is found the neighborhood is rebuilt from scratch, i.e.,
+tries to increment values in the variable order. Consider for instance
+variables x and y. x is incremented one by one to its max, and when it
+is not possible to increment x anymore, y is incremented once. If this
+is a solution, then next neighbor tries to increment x.)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_MAKEACTIVE =
+ R"doc(Operator which inserts an inactive node into a path. Possible
+neighbors for the path 1 -> 2 -> 3 -> 4 with 5 inactive (where 1 and 4
+are first and last nodes of the path) are: 1 -> [5] -> 2 -> 3 -> 4 1
+-> 2 -> [5] -> 3 -> 4 1 -> 2 -> 3 -> [5] -> 4)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_MAKECHAININACTIVE =
+ R"doc(Operator which makes a "chain" of path nodes inactive. Possible
+neighbors for the path 1 -> 2 -> 3 -> 4 (where 1 and 4 are first and
+last nodes of the path) are: 1 -> 3 -> 4 with 2 inactive 1 -> 2 -> 4
+with 3 inactive 1 -> 4 with 2 and 3 inactive)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_MAKEINACTIVE =
+ R"doc(Operator which makes path nodes inactive. Possible neighbors for the
+path 1 -> 2 -> 3 -> 4 (where 1 and 4 are first and last nodes of the
+path) are: 1 -> 3 -> 4 with 2 inactive 1 -> 2 -> 4 with 3 inactive)doc";
+
+static const char* __doc_operations_research_Solver_LocalSearchOperators_OROPT =
+ R"doc(Relocate: OROPT and RELOCATE. Operator which moves a sub-chain of a
+path to another position; the specified chain length is the fixed
+length of the chains being moved. When this length is 1, the operator
+simply moves a node to another position. Possible neighbors for the
+path 1 -> 2 -> 3 -> 4 -> 5, for a chain length of 2 (where (1, 5) are
+first and last nodes of the path and can therefore not be moved): 1 ->
+4 -> [2 -> 3] -> 5 1 -> [3 -> 4] -> 2 -> 5
+
+Using Relocate with chain lengths of 1, 2 and 3 together is equivalent
+to the OrOpt operator on a path. The OrOpt operator is a limited
+version of 3Opt (breaks 3 arcs on a path).)doc";
+
+static const char* __doc_operations_research_Solver_LocalSearchOperators_PATHLNS =
+ R"doc(Operator which relaxes two sub-chains of three consecutive arcs each.
+Each sub-chain is defined by a start node and the next three arcs.
+Those six arcs are relaxed to build a new neighbor. PATHLNS explores
+all possible pairs of starting nodes and so defines n^2 neighbors, n
+being the number of nodes. Note that the two sub-chains can be part of
+the same path; they even may overlap.)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_RELOCATE =
+ R"doc(Relocate neighborhood with length of 1 (see OROPT comment).)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_SIMPLELNS =
+ R"doc(Operator which defines one neighbor per variable. Each neighbor
+relaxes one variable. When a new solution is found the neighborhood is
+rebuilt from scratch. Consider for instance variables x and y. First x
+is relaxed and the solver is looking for the best possible solution
+(with only x relaxed). Then y is relaxed, and the solver is looking
+for a new solution. If a new solution is found, then the next variable
+to be relaxed is x.)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_SWAPACTIVE =
+ R"doc(Operator which replaces an active node by an inactive one. Possible
+neighbors for the path 1 -> 2 -> 3 -> 4 with 5 inactive (where 1 and 4
+are first and last nodes of the path) are: 1 -> [5] -> 3 -> 4 with 2
+inactive 1 -> 2 -> [5] -> 4 with 3 inactive)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_TWOOPT =
+ R"doc(Operator which reverses a sub-chain of a path. It is called TwoOpt
+because it breaks two arcs on the path; resulting paths are called
+two-optimal. Possible neighbors for the path 1 -> 2 -> 3 -> 4 -> 5
+(where (1, 5) are first and last nodes of the path and can therefore
+not be moved): 1 -> [3 -> 2] -> 4 -> 5 1 -> [4 -> 3 -> 2] -> 5 1 -> 2
+-> [4 -> 3] -> 5)doc";
+
+static const char*
+ __doc_operations_research_Solver_LocalSearchOperators_UNACTIVELNS =
+ R"doc(Operator which relaxes all inactive nodes and one sub-chain of six
+consecutive arcs. That way the path can be improved by inserting
+inactive nodes or swapping arcs.)doc";
+
+static const char* __doc_operations_research_Solver_LocalSearchProfile =
+ R"doc(Returns local search profiling information in a human readable format.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAbs = R"doc(|expr|)doc";
+
+static const char* __doc_operations_research_Solver_MakeAbsEquality =
+ R"doc(Creates the constraint abs(var) == abs_var.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAcceptFilter =
+ R"doc(Local Search Filters)doc";
+
+static const char* __doc_operations_research_Solver_MakeActionDemon =
+ R"doc(Creates a demon from a callback.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAllDifferent =
+ R"doc(All variables are pairwise different. This corresponds to the stronger
+version of the propagation algorithm.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAllDifferent_2 =
+ R"doc(All variables are pairwise different. If 'stronger_propagation' is
+true, stronger, and potentially slower propagation will occur. This
+API will be deprecated in the future.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAllDifferentExcept =
+ R"doc(All variables are pairwise different, unless they are assigned to the
+escape value.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAllSolutionCollector =
+ R"doc(Collect all solutions of the search.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAllSolutionCollector_2 =
+ R"doc(Collect all solutions of the search. The variables will need to be
+added later.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAllowedAssignments =
+ R"doc(This method creates a constraint where the graph of the relation
+between the variables is given in extension. There are 'arity'
+variables involved in the relation and the graph is given by a integer
+tuple set.)doc";
+
+static const char* __doc_operations_research_Solver_MakeApplyBranchSelector =
+ R"doc(Creates a decision builder that will set the branch selector.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAssignVariableValue =
+ R"doc(Decisions.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeAssignVariableValueOrDoNothing =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeAssignVariableValueOrFail =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeAssignVariablesValues =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeAssignVariablesValuesOrDoNothing =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeAssignVariablesValuesOrFail =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeAssignment =
+ R"doc(This method creates an empty assignment.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAssignment_2 =
+ R"doc(This method creates an assignment which is a copy of 'a'.)doc";
+
+static const char* __doc_operations_research_Solver_MakeAtMost =
+ R"doc(|{i | vars[i] == value}| <= max_count)doc";
+
+static const char* __doc_operations_research_Solver_MakeAtSolutionCallback =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeBestLexicographicValueSolutionCollector =
+ R"doc(Same as above, but supporting lexicographic objectives; 'maximize'
+specifies the optimization direction for each objective in
+'assignment'.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeBestLexicographicValueSolutionCollector_2 =
+ R"doc(Same as above, but supporting lexicographic objectives; 'maximize'
+specifies the optimization direction for each objective.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeBestValueSolutionCollector =
+ R"doc(Collect the solution corresponding to the optimal value of the
+objective of 'assignment'; if 'assignment' does not have an objective
+no solution is collected. This collector only collects one solution
+corresponding to the best objective value (the first one found).)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeBestValueSolutionCollector_2 =
+ R"doc(Collect the solution corresponding to the optimal value of the
+objective of the internal assignment; if this assignment does not have
+an objective no solution is collected. This collector only collects
+one solution corresponding to the best objective value (the first one
+found). The variables and objective(s) will need to be added later.)doc";
+
+static const char* __doc_operations_research_Solver_MakeBetweenCt =
+ R"doc((l <= expr <= u))doc";
+
+static const char* __doc_operations_research_Solver_MakeBoolVar =
+ R"doc(MakeBoolVar will create a variable with a {0, 1} domain.)doc";
+
+static const char* __doc_operations_research_Solver_MakeBoolVar_2 =
+ R"doc(MakeBoolVar will create a variable with a {0, 1} domain.)doc";
+
+static const char* __doc_operations_research_Solver_MakeBoolVarArray =
+ R"doc(This method will append the vector vars with 'var_count' boolean
+variables having name "name" where is the index of the
+variable.)doc";
+
+static const char* __doc_operations_research_Solver_MakeBoolVarArray_2 =
+ R"doc(This method will append the vector vars with 'var_count' boolean
+variables having no names.)doc";
+
+static const char* __doc_operations_research_Solver_MakeBoolVarArray_3 =
+ R"doc(Same but allocates an array and returns it.)doc";
+
+static const char* __doc_operations_research_Solver_MakeCircuit =
+ R"doc(Force the "nexts" variable to create a complete Hamiltonian path.)doc";
+
+static const char* __doc_operations_research_Solver_MakeClosureDemon =
+ R"doc(!defined(SWIG) Creates a demon from a closure.)doc";
+
+static const char* __doc_operations_research_Solver_MakeConditionalExpression =
+ R"doc(Conditional Expr condition ? expr : unperformed_value)doc";
+
+static const char* __doc_operations_research_Solver_MakeConstantRestart =
+ R"doc(This search monitor will restart the search periodically after
+'frequency' failures.)doc";
+
+static const char* __doc_operations_research_Solver_MakeConstraintAdder =
+ R"doc(Returns a decision builder that will add the given constraint to the
+model.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeConstraintInitialPropagateCallback =
+ R"doc(This method is a specialized case of the MakeConstraintDemon method to
+call the InitiatePropagate of the constraint 'ct'.)doc";
+
+static const char* __doc_operations_research_Solver_MakeConvexPiecewiseExpr =
+ R"doc(Convex piecewise function.)doc";
+
+static const char* __doc_operations_research_Solver_MakeCount =
+ R"doc(|{i | vars[i] == value}| == max_count)doc";
+
+static const char* __doc_operations_research_Solver_MakeCount_2 =
+ R"doc(|{i | vars[i] == value}| == max_count)doc";
+
+static const char* __doc_operations_research_Solver_MakeCover =
+ R"doc(This constraint states that the target_var is the convex hull of the
+intervals. If none of the interval variables is performed, then the
+target var is unperformed too. Also, if the target variable is
+unperformed, then all the intervals variables are unperformed too.)doc";
+
+static const char* __doc_operations_research_Solver_MakeCumulative =
+ R"doc(This constraint forces that, for any integer t, the sum of the demands
+corresponding to an interval containing t does not exceed the given
+capacity.
+
+Intervals and demands should be vectors of equal size.
+
+Demands should only contain non-negative values. Zero values are
+supported, and the corresponding intervals are filtered out, as they
+neither impact nor are impacted by this constraint.)doc";
+
+static const char* __doc_operations_research_Solver_MakeCumulative_2 =
+ R"doc(This constraint forces that, for any integer t, the sum of the demands
+corresponding to an interval containing t does not exceed the given
+capacity.
+
+Intervals and demands should be vectors of equal size.
+
+Demands should only contain non-negative values. Zero values are
+supported, and the corresponding intervals are filtered out, as they
+neither impact nor are impacted by this constraint.)doc";
+
+static const char* __doc_operations_research_Solver_MakeCumulative_3 =
+ R"doc(This constraint forces that, for any integer t, the sum of the demands
+corresponding to an interval containing t does not exceed the given
+capacity.
+
+Intervals and demands should be vectors of equal size.
+
+Demands should only contain non-negative values. Zero values are
+supported, and the corresponding intervals are filtered out, as they
+neither impact nor are impacted by this constraint.)doc";
+
+static const char* __doc_operations_research_Solver_MakeCumulative_4 =
+ R"doc(This constraint enforces that, for any integer t, the sum of the
+demands corresponding to an interval containing t does not exceed the
+given capacity.
+
+Intervals and demands should be vectors of equal size.
+
+Demands should only contain non-negative values. Zero values are
+supported, and the corresponding intervals are filtered out, as they
+neither impact nor are impacted by this constraint.)doc";
+
+static const char* __doc_operations_research_Solver_MakeCumulative_5 =
+ R"doc(This constraint enforces that, for any integer t, the sum of demands
+corresponding to an interval containing t does not exceed the given
+capacity.
+
+Intervals and demands should be vectors of equal size.
+
+Demands should be positive.)doc";
+
+static const char* __doc_operations_research_Solver_MakeCumulative_6 =
+ R"doc(This constraint enforces that, for any integer t, the sum of demands
+corresponding to an interval containing t does not exceed the given
+capacity.
+
+Intervals and demands should be vectors of equal size.
+
+Demands should be positive.)doc";
+
+static const char* __doc_operations_research_Solver_MakeDecision = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeDecisionBuilderFromAssignment =
+ R"doc(Returns a decision builder for which the left-most leaf corresponds to
+assignment, the rest of the tree being explored using 'db'.)doc";
+
+static const char* __doc_operations_research_Solver_MakeDefaultPhase =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeDefaultPhase_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeDefaultRegularLimitParameters =
+ R"doc(Creates a regular limit proto containing default values.)doc";
+
+static const char* __doc_operations_research_Solver_MakeDefaultSolutionPool =
+ R"doc(Solution Pool.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeDelayedConstraintInitialPropagateCallback =
+ R"doc(This method is a specialized case of the MakeConstraintDemon method to
+call the InitiatePropagate of the constraint 'ct' with low priority.)doc";
+
+static const char* __doc_operations_research_Solver_MakeDelayedPathCumul =
+ R"doc(Delayed version of the same constraint: propagation on the nexts
+variables is delayed until all constraints have propagated.)doc";
+
+static const char* __doc_operations_research_Solver_MakeDeviation =
+ R"doc(Deviation constraint: sum_i |n * vars[i] - total_sum| <= deviation_var
+and sum_i vars[i] == total_sum n = #vars)doc";
+
+static const char* __doc_operations_research_Solver_MakeDifference =
+ R"doc(left - right)doc";
+
+static const char* __doc_operations_research_Solver_MakeDifference_2 =
+ R"doc(value - expr)doc";
+
+static const char* __doc_operations_research_Solver_MakeDisjunctiveConstraint =
+ R"doc(This constraint forces all interval vars into an non-overlapping
+sequence. Intervals with zero duration can be scheduled anywhere.)doc";
+
+static const char* __doc_operations_research_Solver_MakeDistribute =
+ R"doc(Aggregated version of count: |{i | v[i] == values[j]}| == cards[j])doc";
+
+static const char* __doc_operations_research_Solver_MakeDistribute_2 =
+ R"doc(Aggregated version of count: |{i | v[i] == values[j]}| == cards[j])doc";
+
+static const char* __doc_operations_research_Solver_MakeDistribute_3 =
+ R"doc(Aggregated version of count: |{i | v[i] == j}| == cards[j])doc";
+
+static const char* __doc_operations_research_Solver_MakeDistribute_4 =
+ R"doc(Aggregated version of count with bounded cardinalities: forall j in 0
+.. card_size - 1: card_min <= |{i | v[i] == j}| <= card_max)doc";
+
+static const char* __doc_operations_research_Solver_MakeDistribute_5 =
+ R"doc(Aggregated version of count with bounded cardinalities: forall j in 0
+.. card_size - 1: card_min[j] <= |{i | v[i] == j}| <= card_max[j])doc";
+
+static const char* __doc_operations_research_Solver_MakeDistribute_6 =
+ R"doc(Aggregated version of count with bounded cardinalities: forall j in 0
+.. card_size - 1: card_min[j] <= |{i | v[i] == j}| <= card_max[j])doc";
+
+static const char* __doc_operations_research_Solver_MakeDistribute_7 =
+ R"doc(Aggregated version of count with bounded cardinalities: forall j in 0
+.. card_size - 1: card_min[j] <= |{i | v[i] == values[j]}| <=
+card_max[j])doc";
+
+static const char* __doc_operations_research_Solver_MakeDistribute_8 =
+ R"doc(Aggregated version of count with bounded cardinalities: forall j in 0
+.. card_size - 1: card_min[j] <= |{i | v[i] == values[j]}| <=
+card_max[j])doc";
+
+static const char* __doc_operations_research_Solver_MakeDiv =
+ R"doc(expr / value (integer division))doc";
+
+static const char* __doc_operations_research_Solver_MakeDiv_2 =
+ R"doc(numerator / denominator (integer division). Terms need to be positive.)doc";
+
+static const char* __doc_operations_research_Solver_MakeElement =
+ R"doc(values[index])doc";
+
+static const char* __doc_operations_research_Solver_MakeElement_2 =
+ R"doc(values[index])doc";
+
+static const char* __doc_operations_research_Solver_MakeElement_3 =
+ R"doc(Function-based element. The constraint takes ownership of the
+callback. The callback must be able to cope with any possible value in
+the domain of 'index' (potentially negative ones too).)doc";
+
+static const char* __doc_operations_research_Solver_MakeElement_4 =
+ R"doc(2D version of function-based element expression, values(expr1, expr2).)doc";
+
+static const char* __doc_operations_research_Solver_MakeElement_5 =
+ R"doc(vars[expr])doc";
+
+static const char* __doc_operations_research_Solver_MakeElement_6 =
+ R"doc(vars(argument))doc";
+
+static const char* __doc_operations_research_Solver_MakeElementEquality =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeElementEquality_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeElementEquality_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeElementEquality_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeEnterSearchCallback =
+ R"doc(----- Callback-based search monitors -----)doc";
+
+static const char* __doc_operations_research_Solver_MakeEquality =
+ R"doc(left == right)doc";
+
+static const char* __doc_operations_research_Solver_MakeEquality_2 =
+ R"doc(expr == value)doc";
+
+static const char* __doc_operations_research_Solver_MakeEquality_3 =
+ R"doc(expr == value)doc";
+
+static const char* __doc_operations_research_Solver_MakeEquality_4 =
+ R"doc(This constraints states that the two interval variables are equal.)doc";
+
+static const char* __doc_operations_research_Solver_MakeExitSearchCallback =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeFailDecision =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeFalseConstraint =
+ R"doc(This constraint always fails.)doc";
+
+static const char* __doc_operations_research_Solver_MakeFalseConstraint_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeFirstSolutionCollector =
+ R"doc(Collect the first solution of the search.)doc";
+
+static const char* __doc_operations_research_Solver_MakeFirstSolutionCollector_2 =
+ R"doc(Collect the first solution of the search. The variables will need to
+be added later.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationEndSyncedOnEndIntervalVar =
+ R"doc(Creates an interval var with a fixed duration whose end is
+synchronized with the end of another interval, with a given offset.
+The performed status is also in sync with the performed status of the
+given interval variable.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationEndSyncedOnStartIntervalVar =
+ R"doc(Creates an interval var with a fixed duration whose end is
+synchronized with the start of another interval, with a given offset.
+The performed status is also in sync with the performed status of the
+given interval variable.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationIntervalVar =
+ R"doc(Creates an interval var with a fixed duration. The duration must be
+greater than 0. If optional is true, then the interval can be
+performed or unperformed. If optional is false, then the interval is
+always performed.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationIntervalVar_2 =
+ R"doc(Creates a performed interval var with a fixed duration. The duration
+must be greater than 0.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationIntervalVar_3 =
+ R"doc(Creates an interval var with a fixed duration, and performed_variable.
+The duration must be greater than 0.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationIntervalVarArray =
+ R"doc(This method fills the vector with 'count' interval variables built
+with the corresponding parameters.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationIntervalVarArray_2 =
+ R"doc(This method fills the vector with 'count' interval var built with the
+corresponding start variables.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationIntervalVarArray_3 =
+ R"doc(This method fills the vector with interval variables built with the
+corresponding start variables.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationIntervalVarArray_4 =
+ R"doc(This method fills the vector with interval variables built with the
+corresponding start variables.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationIntervalVarArray_5 =
+ R"doc(This method fills the vector with interval variables built with the
+corresponding start and performed variables.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationIntervalVarArray_6 =
+ R"doc(This method fills the vector with interval variables built with the
+corresponding start and performed variables.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationStartSyncedOnEndIntervalVar =
+ R"doc(Creates an interval var with a fixed duration whose start is
+synchronized with the end of another interval, with a given offset.
+The performed status is also in sync with the performed status of the
+given interval variable.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeFixedDurationStartSyncedOnStartIntervalVar =
+ R"doc(Creates an interval var with a fixed duration whose start is
+synchronized with the start of another interval, with a given offset.
+The performed status is also in sync with the performed status of the
+given interval variable.)doc";
+
+static const char* __doc_operations_research_Solver_MakeFixedInterval =
+ R"doc(Creates a fixed and performed interval.)doc";
+
+static const char* __doc_operations_research_Solver_MakeGenericTabuSearch =
+ R"doc(Creates a Tabu Search based on the vars |vars|. A solution is "tabu"
+if all the vars in |vars| keep their value.)doc";
+
+static const char* __doc_operations_research_Solver_MakeGreater =
+ R"doc(left > right)doc";
+
+static const char* __doc_operations_research_Solver_MakeGreater_2 =
+ R"doc(expr > value)doc";
+
+static const char* __doc_operations_research_Solver_MakeGreater_3 =
+ R"doc(expr > value)doc";
+
+static const char* __doc_operations_research_Solver_MakeGreaterOrEqual =
+ R"doc(left >= right)doc";
+
+static const char* __doc_operations_research_Solver_MakeGreaterOrEqual_2 =
+ R"doc(expr >= value)doc";
+
+static const char* __doc_operations_research_Solver_MakeGreaterOrEqual_3 =
+ R"doc(expr >= value)doc";
+
+static const char* __doc_operations_research_Solver_MakeGuidedLocalSearch =
+ R"doc(Creates a Guided Local Search monitor. Description here:
+http://en.wikipedia.org/wiki/Guided_Local_Search)doc";
+
+static const char* __doc_operations_research_Solver_MakeGuidedLocalSearch_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeIfThenElseCt =
+ R"doc(Special cases with arrays of size two.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIndexExpression =
+ R"doc(Returns the expression expr such that vars[expr] == value. It assumes
+that vars are all different.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIndexOfConstraint =
+ R"doc(This constraint is a special case of the element constraint with an
+array of integer variables, where the variables are all different and
+the index variable is constrained such that vars[index] == target.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeIndexOfFirstMaxValueConstraint =
+ R"doc(Creates a constraint that binds the index variable to the index of the
+first variable with the maximum value.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeIndexOfFirstMinValueConstraint =
+ R"doc(Creates a constraint that binds the index variable to the index of the
+first variable with the minimum value.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntConst =
+ R"doc(IntConst will create a constant expression.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntConst_2 =
+ R"doc(IntConst will create a constant expression.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntVar =
+ R"doc(MakeIntVar will create the best range based int var for the bounds
+given.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntVar_2 =
+ R"doc(MakeIntVar will create a variable with the given sparse domain.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntVar_3 =
+ R"doc(MakeIntVar will create a variable with the given sparse domain.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntVar_4 =
+ R"doc(MakeIntVar will create the best range based int var for the bounds
+given.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntVar_5 =
+ R"doc(MakeIntVar will create a variable with the given sparse domain.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntVar_6 =
+ R"doc(MakeIntVar will create a variable with the given sparse domain.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntVarArray =
+ R"doc(This method will append the vector vars with 'var_count' variables
+having bounds vmin and vmax and having name "name" where is the
+index of the variable.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntVarArray_2 =
+ R"doc(This method will append the vector vars with 'var_count' variables
+having bounds vmin and vmax and having no names.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntVarArray_3 =
+ R"doc(Same but allocates an array and returns it.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntervalRelaxedMax =
+ R"doc(Creates and returns an interval variable that wraps around the given
+one, relaxing the max start and end. Relaxing means making unbounded
+when optional. If the variable is non optional, this method returns
+interval_var.
+
+More precisely, such an interval variable behaves as follows: * When
+the underlying must be performed, the returned interval variable
+behaves exactly as the underlying; * When the underlying may or may
+not be performed, the returned interval variable behaves like the
+underlying, except that it is unbounded on the max side; * When the
+underlying cannot be performed, the returned interval variable is of
+duration 0 and must be performed in an interval unbounded on both
+sides.
+
+This is very useful for implementing propagators that may only modify
+the start min or end min.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntervalRelaxedMin =
+ R"doc(Creates and returns an interval variable that wraps around the given
+one, relaxing the min start and end. Relaxing means making unbounded
+when optional. If the variable is non-optional, this method returns
+interval_var.
+
+More precisely, such an interval variable behaves as follows: * When
+the underlying must be performed, the returned interval variable
+behaves exactly as the underlying; * When the underlying may or may
+not be performed, the returned interval variable behaves like the
+underlying, except that it is unbounded on the min side; * When the
+underlying cannot be performed, the returned interval variable is of
+duration 0 and must be performed in an interval unbounded on both
+sides.
+
+This is very useful to implement propagators that may only modify the
+start max or end max.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntervalVar =
+ R"doc(Creates an interval var by specifying the bounds on start, duration,
+and end.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntervalVarArray =
+ R"doc(This method fills the vector with 'count' interval var built with the
+corresponding parameters.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntervalVarRelation =
+ R"doc(This method creates a relation between an interval var and a date.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIntervalVarRelation_2 =
+ R"doc(This method creates a relation between two interval vars.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeIntervalVarRelationWithDelay =
+ R"doc(This method creates a relation between two interval vars. The given
+delay is added to the second interval. i.e.: t1 STARTS_AFTER_END of t2
+with a delay of 2 means t1 will start at least two units of time after
+the end of t2.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeInversePermutationConstraint =
+ R"doc(Creates a constraint that enforces that 'left' and 'right' both
+represent permutations of [0..left.size()-1], and that 'right' is the
+inverse permutation of 'left', i.e. for all i in [0..left.size()-1],
+right[left[i]] = i.)doc";
+
+static const char* __doc_operations_research_Solver_MakeIsBetweenCt =
+ R"doc(b == (l <= expr <= u))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsBetweenVar =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeIsDifferentCstCt =
+ R"doc(boolvar == (var != value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsDifferentCstVar =
+ R"doc(status var of (var != value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsDifferentCt =
+ R"doc(b == (v1 != v2))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsDifferentVar =
+ R"doc(status var of (v1 != v2))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsEqualCstCt =
+ R"doc(boolvar == (var == value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsEqualCstVar =
+ R"doc(status var of (var == value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsEqualCt =
+ R"doc(b == (v1 == v2))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsEqualVar =
+ R"doc(status var of (v1 == v2))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsGreaterCstCt =
+ R"doc(b == (v > c))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsGreaterCstVar =
+ R"doc(status var of (var > value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsGreaterCt =
+ R"doc(b == (left > right))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsGreaterOrEqualCstCt =
+ R"doc(boolvar == (var >= value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsGreaterOrEqualCstVar =
+ R"doc(status var of (var >= value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsGreaterOrEqualCt =
+ R"doc(b == (left >= right))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsGreaterOrEqualVar =
+ R"doc(status var of (left >= right))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsGreaterVar =
+ R"doc(status var of (left > right))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsLessCstCt =
+ R"doc(b == (v < c))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsLessCstVar =
+ R"doc(status var of (var < value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsLessCt =
+ R"doc(b == (left < right))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsLessOrEqualCstCt =
+ R"doc(boolvar == (var <= value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsLessOrEqualCstVar =
+ R"doc(status var of (var <= value))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsLessOrEqualCt =
+ R"doc(b == (left <= right))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsLessOrEqualVar =
+ R"doc(status var of (left <= right))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsLessVar =
+ R"doc(status var of (left < right))doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeIsLexicalLessOrEqualWithOffsetsCt =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeIsMemberCt =
+ R"doc(boolvar == (expr in set))doc";
+
+static const char* __doc_operations_research_Solver_MakeIsMemberCt_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeIsMemberVar =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeIsMemberVar_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeLastSolutionCollector =
+ R"doc(Collect the last solution of the search.)doc";
+
+static const char* __doc_operations_research_Solver_MakeLastSolutionCollector_2 =
+ R"doc(Collect the last solution of the search. The variables will need to be
+added later.)doc";
+
+static const char* __doc_operations_research_Solver_MakeLess =
+ R"doc(left < right)doc";
+
+static const char* __doc_operations_research_Solver_MakeLess_2 =
+ R"doc(expr < value)doc";
+
+static const char* __doc_operations_research_Solver_MakeLess_3 =
+ R"doc(expr < value)doc";
+
+static const char* __doc_operations_research_Solver_MakeLessOrEqual =
+ R"doc(left <= right)doc";
+
+static const char* __doc_operations_research_Solver_MakeLessOrEqual_2 =
+ R"doc(expr <= value)doc";
+
+static const char* __doc_operations_research_Solver_MakeLessOrEqual_3 =
+ R"doc(expr <= value)doc";
+
+static const char* __doc_operations_research_Solver_MakeLexicalLess =
+ R"doc(Creates a constraint that enforces that left is lexicographically less
+than right.)doc";
+
+static const char* __doc_operations_research_Solver_MakeLexicalLessOrEqual =
+ R"doc(Creates a constraint that enforces that left is lexicographically less
+than or equal to right.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeLexicalLessOrEqualWithOffsets =
+ R"doc(Creates a constraint that enforces that left is lexicographically less
+than or equal to right with an offset. This means that for the first
+index i such that left[i] is not in [right[i] - (offset[i] - 1),
+right[i]], left[i] + offset[i] <= right[i]. Offset values must be > 0.)doc";
+
+static const char* __doc_operations_research_Solver_MakeLexicographicOptimize =
+ R"doc(Creates a lexicographic objective, following the order of the
+variables given. Each variable has a corresponding optimization
+direction and step.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeLexicographicSimulatedAnnealing =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeLexicographicTabuSearch = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeLightElement =
+ R"doc(Returns a light one-dimension function-based element constraint
+ensuring var == values(index). The constraint does not perform bound
+reduction of the resulting variable until the index variable is bound.
+If deep_serialize returns false, the model visitor will not extract
+all possible values from the values function.)doc";
+
+static const char* __doc_operations_research_Solver_MakeLightElement_2 =
+ R"doc(Light two-dimension function-based element constraint ensuring var ==
+values(index1, index2). The constraint does not perform bound
+reduction of the resulting variable until the index variables are
+bound. If deep_serialize returns false, the model visitor will not
+extract all possible values from the values function.)doc";
+
+static const char* __doc_operations_research_Solver_MakeLocalSearchPhase =
+ R"doc(Local Search decision builders factories. Local search is used to
+improve a given solution. This initial solution can be specified
+either by an Assignment or by a DecisionBulder, and the corresponding
+variables, the initial solution being the first solution found by the
+DecisionBuilder. The LocalSearchPhaseParameters parameter holds the
+actual definition of the local search phase: - a local search operator
+used to explore the neighborhood of the current solution, - a decision
+builder to instantiate unbound variables once a neighbor has been
+defined; in the case of LNS-based operators instantiates fragment
+variables; search monitors can be added to this sub-search by wrapping
+the decision builder with MakeSolveOnce. - a search limit specifying
+how long local search looks for neighbors before accepting one; the
+last neighbor is always taken and in the case of a greedy search, this
+corresponds to the best local neighbor; first-accept (which is the
+default behavior) can be modeled using a solution found limit of 1, -
+a vector of local search filters used to speed up the search by
+pruning unfeasible neighbors. Metaheuristics can be added by defining
+specialized search monitors; currently down/up-hill climbing is
+available through OptimizeVar, as well as Guided Local Search, Tabu
+Search and Simulated Annealing.)doc";
+
+static const char* __doc_operations_research_Solver_MakeLocalSearchPhase_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeLocalSearchPhase_3 =
+ R"doc(Variant with a sub_decison_builder specific to the first solution.)doc";
+
+static const char* __doc_operations_research_Solver_MakeLocalSearchPhase_4 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeLocalSearchPhaseParameters =
+ R"doc(Local Search Phase Parameters)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeLocalSearchPhaseParameters_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeLocalSearchPhaseParameters_3 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeLocalSearchPhaseParameters_4 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeLocalSearchPhaseParameters_5 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeLocalSearchPhaseParameters_6 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeLubyRestart =
+ R"doc(This search monitor will restart the search periodically. At the
+iteration n, it will restart after scale_factor * Luby(n) failures
+where Luby is the Luby Strategy (i.e. 1 1 2 1 1 2 4 1 1 2 1 1 2 4
+8...).)doc";
+
+static const char* __doc_operations_research_Solver_MakeMapDomain =
+ R"doc(This constraint maps the domain of 'var' onto the array of variables
+'actives'. That is for all i in [0 .. size - 1]: actives[i] == 1 <=>
+var->Contains(i);)doc";
+
+static const char* __doc_operations_research_Solver_MakeMax =
+ R"doc(std::max(vars))doc";
+
+static const char* __doc_operations_research_Solver_MakeMax_2 =
+ R"doc(std::max(left, right))doc";
+
+static const char* __doc_operations_research_Solver_MakeMax_3 =
+ R"doc(std::max(expr, value))doc";
+
+static const char* __doc_operations_research_Solver_MakeMax_4 =
+ R"doc(std::max(expr, value))doc";
+
+static const char* __doc_operations_research_Solver_MakeMaxEquality =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeMaximize =
+ R"doc(Creates a maximization objective.)doc";
+
+static const char* __doc_operations_research_Solver_MakeMemberCt =
+ R"doc(expr in set. Propagation is lazy, i.e. this constraint does not
+creates holes in the domain of the variable.)doc";
+
+static const char* __doc_operations_research_Solver_MakeMemberCt_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeMin =
+ R"doc(std::min(vars))doc";
+
+static const char* __doc_operations_research_Solver_MakeMin_2 =
+ R"doc(std::min (left, right))doc";
+
+static const char* __doc_operations_research_Solver_MakeMin_3 =
+ R"doc(std::min(expr, value))doc";
+
+static const char* __doc_operations_research_Solver_MakeMin_4 =
+ R"doc(std::min(expr, value))doc";
+
+static const char* __doc_operations_research_Solver_MakeMinEquality =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeMinimize =
+ R"doc(Creates a minimization objective.)doc";
+
+static const char* __doc_operations_research_Solver_MakeMirrorInterval =
+ R"doc(Creates an interval var that is the mirror image of the given one,
+that is, the interval var obtained by reversing the axis.)doc";
+
+static const char* __doc_operations_research_Solver_MakeModulo =
+ R"doc(Modulo expression x % mod (with the python convention for modulo).)doc";
+
+static const char* __doc_operations_research_Solver_MakeModulo_2 =
+ R"doc(Modulo expression x % mod (with the python convention for modulo).)doc";
+
+static const char* __doc_operations_research_Solver_MakeMonotonicElement =
+ R"doc(Function based element. The constraint takes ownership of the
+callback. The callback must be monotonic. It must be able to cope with
+any possible value in the domain of 'index' (potentially negative ones
+too). Furtermore, monotonicity is not checked. Thus giving a non-
+monotonic function, or specifying an incorrect increasing parameter
+will result in undefined behavior.)doc";
+
+static const char* __doc_operations_research_Solver_MakeMoveTowardTargetOperator =
+ R"doc(Creates a local search operator that tries to move the assignment of
+some variables toward a target. The target is given as an Assignment.
+This operator generates neighbors in which the only difference
+compared to the current state is that one variable that belongs to the
+target assignment is set to its target value.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeMoveTowardTargetOperator_2 =
+ R"doc(Creates a local search operator that tries to move the assignment of
+some variables toward a target. The target is given either as two
+vectors: a vector of variables and a vector of associated target
+values. The two vectors should be of the same length. This operator
+generates neighbors in which the only difference compared to the
+current state is that one variable that belongs to the given vector is
+set to its target value.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNBestLexicographicValueSolutionCollector =
+ R"doc(Same as above but supporting lexicographic objectives; 'maximize'
+specifies the optimization direction for each objective.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNBestLexicographicValueSolutionCollector_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNBestValueSolutionCollector =
+ R"doc(Same as MakeBestValueSolutionCollector but collects the best
+solution_count solutions. Collected solutions are sorted in increasing
+optimality order (the best solution is the last one).)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNBestValueSolutionCollector_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeNeighborhoodLimit =
+ R"doc(Creates a local search operator that wraps another local search
+operator and limits the number of neighbors explored (i.e., calls to
+MakeNextNeighbor from the current solution (between two calls to
+Start()). When this limit is reached, MakeNextNeighbor() returns
+false. The counter is cleared when Start() is called.)doc";
+
+static const char* __doc_operations_research_Solver_MakeNestedOptimize =
+ R"doc(NestedOptimize will collapse a search tree described by a decision
+builder 'db' and a set of monitors and wrap it into a single point. If
+there are no solutions to this nested tree, then NestedOptimize will
+fail. If there are solutions, it will find the best as described by
+the mandatory objective in the solution as well as the optimization
+direction, instantiate all variables to this solution, and return
+nullptr.)doc";
+
+static const char* __doc_operations_research_Solver_MakeNestedOptimize_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeNestedOptimize_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeNestedOptimize_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeNestedOptimize_5 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeNestedOptimize_6 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeNoCycle =
+ R"doc(Prevent cycles. The "nexts" variables represent the next in the chain.
+"active" variables indicate if the corresponding next variable is
+active; this could be useful to model unperformed nodes in a routing
+problem. A callback can be added to specify sink values (by default
+sink values are values >= vars.size()). Ownership of the callback is
+passed to the constraint. If assume_paths is either not specified or
+true, the constraint assumes the "nexts" variables represent paths
+(and performs a faster propagation); otherwise the constraint assumes
+they represent a forest.)doc";
+
+static const char* __doc_operations_research_Solver_MakeNoCycle_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeNonEquality =
+ R"doc(left != right)doc";
+
+static const char* __doc_operations_research_Solver_MakeNonEquality_2 =
+ R"doc(expr != value)doc";
+
+static const char* __doc_operations_research_Solver_MakeNonEquality_3 =
+ R"doc(expr != value)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNonOverlappingBoxesConstraint =
+ R"doc(This constraint states that all the boxes must not overlap. The
+coordinates of box i are: (x_vars[i], y_vars[i]), (x_vars[i],
+y_vars[i] + y_size[i]), (x_vars[i] + x_size[i], y_vars[i]), (x_vars[i]
++ x_size[i], y_vars[i] + y_size[i]). The sizes must be non-negative.
+Boxes with a zero dimension can be pushed like any box.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNonOverlappingBoxesConstraint_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNonOverlappingBoxesConstraint_3 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNonOverlappingNonStrictBoxesConstraint =
+ R"doc(This constraint states that all the boxes must not overlap. The
+coordinates of box i are: (x_vars[i], y_vars[i]), (x_vars[i],
+y_vars[i] + y_size[i]), (x_vars[i] + x_size[i], y_vars[i]), (x_vars[i]
++ x_size[i], y_vars[i] + y_size[i]). The sizes must be positive. Boxes
+with a zero dimension can be placed anywhere.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNonOverlappingNonStrictBoxesConstraint_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeNonOverlappingNonStrictBoxesConstraint_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeNotBetweenCt =
+ R"doc((expr < l || expr > u) This constraint is lazy as it will not make
+holes in the domain of variables. It will propagate only when
+expr->Min() >= l or expr->Max() <= u.)doc";
+
+static const char* __doc_operations_research_Solver_MakeNotMemberCt =
+ R"doc(expr not in set.)doc";
+
+static const char* __doc_operations_research_Solver_MakeNotMemberCt_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeNotMemberCt_3 =
+ R"doc(expr should not be in the list of forbidden intervals
+[start[i]..end[i]].)doc";
+
+static const char* __doc_operations_research_Solver_MakeNotMemberCt_4 =
+ R"doc(expr should not be in the list of forbidden intervals
+[start[i]..end[i]].)doc";
+
+static const char* __doc_operations_research_Solver_MakeNotMemberCt_5 =
+ R"doc(expr should not be in the list of forbidden intervals.)doc";
+
+static const char* __doc_operations_research_Solver_MakeNullIntersect =
+ R"doc(Creates a constraint that states that all variables in the first
+vector are different from all variables in the second group. Thus the
+set of values in the first vector does not intersect with the set of
+values in the second vector.)doc";
+
+static const char* __doc_operations_research_Solver_MakeNullIntersectExcept =
+ R"doc(Creates a constraint that states that all variables in the first
+vector are different from all variables from the second group, unless
+they are assigned to the escape value. Thus the set of values in the
+first vector minus the escape value does not intersect with the set of
+values in the second vector.)doc";
+
+static const char* __doc_operations_research_Solver_MakeOperator =
+ R"doc(Local Search Operators.)doc";
+
+static const char* __doc_operations_research_Solver_MakeOperator_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeOperator_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeOperator_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeOpposite =
+ R"doc(-expr)doc";
+
+static const char* __doc_operations_research_Solver_MakeOptimize =
+ R"doc(Creates a objective with a given sense (true = maximization).)doc";
+
+static const char* __doc_operations_research_Solver_MakePack =
+ R"doc(This constraint packs all variables onto 'number_of_bins' variables.
+For any given variable, a value of 'number_of_bins' indicates that the
+variable is not assigned to any bin. Dimensions, i.e., cumulative
+constraints on this packing, can be added directly from the pack
+class.)doc";
+
+static const char* __doc_operations_research_Solver_MakePathConnected =
+ R"doc(Check whether more propagation is needed.)doc";
+
+static const char* __doc_operations_research_Solver_MakePathCumul =
+ R"doc(Creates a constraint which accumulates values along a path such that:
+cumuls[next[i]] = cumuls[i] + transits[i]. Active variables indicate
+if the corresponding next variable is active; this could be useful to
+model unperformed nodes in a routing problem.)doc";
+
+static const char* __doc_operations_research_Solver_MakePathCumul_2 =
+ R"doc(Creates a constraint which accumulates values along a path such that:
+cumuls[next[i]] = cumuls[i] + transit_evaluator(i, next[i]). Active
+variables indicate if the corresponding next variable is active; this
+could be useful to model unperformed nodes in a routing problem.
+Ownership of transit_evaluator is taken and it must be a repeatable
+callback.)doc";
+
+static const char* __doc_operations_research_Solver_MakePathCumul_3 =
+ R"doc(Creates a constraint which accumulates values along a path such that:
+cumuls[next[i]] = cumuls[i] + transit_evaluator(i, next[i]) +
+slacks[i]. Active variables indicate if the corresponding next
+variable is active; this could be useful to model unperformed nodes in
+a routing problem. Ownership of transit_evaluator is taken and it must
+be a repeatable callback.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakePathEnergyCostConstraint = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakePathPrecedenceConstraint =
+ R"doc(the implementation can easily be modified to do that; evaluate the
+impact on models solved with local search.)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakePathPrecedenceConstraint_2 =
+ R"doc(Same as MakePathPrecedenceConstraint but ensures precedence pairs on
+some paths follow a LIFO or FIFO order. LIFO order: given 2 pairs
+(a,b) and (c,d), if a is before c on the path then d must be before b
+or b must be before c. FIFO order: given 2 pairs (a,b) and (c,d), if a
+is before c on the path then b must be before d. LIFO (resp. FIFO)
+orders are enforced only on paths starting by indices in
+lifo_path_starts (resp. fifo_path_start).)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakePathTransitPrecedenceConstraint =
+ R"doc(Same as MakePathPrecedenceConstraint but will force i to be before j
+if the sum of transits on the path from i to j is strictly positive.)doc";
+
+static const char* __doc_operations_research_Solver_MakePhase =
+ R"doc(for all other functions that have several homonyms in this .h).)doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_3 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_4 =
+ R"doc(var_val1_val2_comparator(var, val1, val2) is true iff assigning value
+"val1" to variable "var" is better than assigning value "val2".)doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_5 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_6 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_7 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_8 =
+ R"doc(Shortcuts for small arrays.)doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_9 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_10 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_11 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_12 =
+ R"doc(Returns a decision builder which assigns values to variables which
+minimize the values returned by the evaluator. The arguments passed to
+the evaluator callback are the indices of the variables in vars and
+the values of these variables. Ownership of the callback is passed to
+the decision builder.)doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_13 =
+ R"doc(Returns a decision builder which assigns values to variables which
+minimize the values returned by the evaluator. In case of tie breaks,
+the second callback is used to choose the best index in the array of
+equivalent pairs with equivalent evaluations. The arguments passed to
+the evaluator callback are the indices of the variables in vars and
+the values of these variables. Ownership of the callback is passed to
+the decision builder.)doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_14 =
+ R"doc(Scheduling phases.)doc";
+
+static const char* __doc_operations_research_Solver_MakePhase_15 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePiecewiseLinearExpr =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakePower =
+ R"doc(expr ^ n (n > 0))doc";
+
+static const char* __doc_operations_research_Solver_MakePrintModelVisitor =
+ R"doc(Prints the model.)doc";
+
+static const char* __doc_operations_research_Solver_MakeProd =
+ R"doc(left * right)doc";
+
+static const char* __doc_operations_research_Solver_MakeProd_2 =
+ R"doc(expr * value)doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeProfiledDecisionBuilderWrapper =
+ R"doc(Activates profiling on a decision builder.)doc";
+
+static const char* __doc_operations_research_Solver_MakeRandomLnsOperator =
+ R"doc(Creates a large neighborhood search operator which creates fragments
+(set of relaxed variables) with up to number_of_variables random
+variables (sampling with replacement is performed meaning that at most
+number_of_variables variables are selected). Warning: this operator
+will always return neighbors; using it without a search limit will
+result in a non-ending search. Optionally a random seed can be
+specified.)doc";
+
+static const char* __doc_operations_research_Solver_MakeRandomLnsOperator_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeRankFirstInterval =
+ R"doc(Returns a decision that tries to rank first the ith interval var in
+the sequence variable.)doc";
+
+static const char* __doc_operations_research_Solver_MakeRankLastInterval =
+ R"doc(Returns a decision that tries to rank last the ith interval var in the
+sequence variable.)doc";
+
+static const char* __doc_operations_research_Solver_MakeRejectFilter =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeRestoreAssignment =
+ R"doc(Returns a DecisionBuilder which restores an Assignment (calls void
+Assignment::Restore()))doc";
+
+static const char* __doc_operations_research_Solver_MakeScalProd =
+ R"doc(scalar product)doc";
+
+static const char* __doc_operations_research_Solver_MakeScalProd_2 =
+ R"doc(scalar product)doc";
+
+static const char* __doc_operations_research_Solver_MakeScalProdEquality =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeScalProdEquality_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeScalProdEquality_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeScalProdEquality_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeScalProdGreaterOrEqual =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeScalProdGreaterOrEqual_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeScalProdLessOrEqual =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeScalProdLessOrEqual_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeScheduleOrExpedite =
+ R"doc(Returns a decision that tries to schedule a task at a given time. On
+the Apply branch, it will set that interval var as performed and set
+its end to 'est'. On the Refute branch, it will just update the
+'marker' to 'est' - 1. This decision is used in the
+INTERVAL_SET_TIMES_BACKWARD strategy.)doc";
+
+static const char* __doc_operations_research_Solver_MakeScheduleOrPostpone =
+ R"doc(Returns a decision that tries to schedule a task at a given time. On
+the Apply branch, it will set that interval var as performed and set
+its start to 'est'. On the Refute branch, it will just update the
+'marker' to 'est' + 1. This decision is used in the
+INTERVAL_SET_TIMES_FORWARD strategy.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchLog =
+ R"doc(The SearchMonitors below will display a periodic search log on
+LOG(INFO) every branch_period branches explored.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchLog_2 =
+ R"doc(At each solution, this monitor also display the var value.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchLog_3 =
+ R"doc(At each solution, this monitor will also display result of @p
+display_callback.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchLog_4 =
+ R"doc(At each solution, this monitor will display the 'var' value and the
+result of @p display_callback.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchLog_5 =
+ R"doc(At each solution, this monitor will display the 'vars' values and the
+result of @p display_callback.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchLog_6 =
+ R"doc(OptimizeVar Search Logs At each solution, this monitor will also
+display the 'opt_var' value.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchLog_7 =
+ R"doc(Creates a search monitor that will also print the result of the
+display callback.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchLog_8 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchProgressBar =
+ R"doc(Creates a search monitor tracking the progress of the search in a
+progress bar. If a search limit is specified in the search, the bar
+shows the progress percentage before reaching the limit. If no limit
+is specified, an activity bar is displayed.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSearchTrace =
+ R"doc(Creates a search monitor that will trace precisely the behavior of the
+search. Use this only for low level debugging.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSemiContinuousExpr =
+ R"doc(Semi continuous Expression (x <= 0 -> f(x) = 0; x > 0 -> f(x) = ax +
+b) a >= 0 and b >= 0)doc";
+
+static const char* __doc_operations_research_Solver_MakeSimulatedAnnealing =
+ R"doc(Creates a Simulated Annealing monitor.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSolveOnce =
+ R"doc(SolveOnce will collapse a search tree described by a decision builder
+'db' and a set of monitors and wrap it into a single point. If there
+are no solutions to this nested tree, then SolveOnce will fail. If
+there is a solution, it will find it and returns nullptr.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSolveOnce_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSolveOnce_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSolveOnce_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSolveOnce_5 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSolveOnce_6 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSortingConstraint =
+ R"doc(Creates a constraint binding the arrays of variables "vars" and
+"sorted_vars": sorted_vars[0] must be equal to the minimum of all
+variables in vars, and so on: the value of sorted_vars[i] must be
+equal to the i-th value of variables invars.
+
+This constraint propagates in both directions: from "vars" to
+"sorted_vars" and vice-versa.
+
+Behind the scenes, this constraint maintains that: - sorted is always
+increasing. - whatever the values of vars, there exists a permutation
+that injects its values into the sorted variables.
+
+For more info, please have a look at: https://mpi-
+inf.mpg.de/~mehlhorn/ftp/Mehlhorn-Thiel.pdf)doc";
+
+static const char* __doc_operations_research_Solver_MakeSplitVariableDomain =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSquare =
+ R"doc(expr * expr)doc";
+
+static const char* __doc_operations_research_Solver_MakeStatisticsModelVisitor =
+ R"doc(Displays some nice statistics on the model.)doc";
+
+static const char* __doc_operations_research_Solver_MakeStoreAssignment =
+ R"doc(Returns a DecisionBuilder which stores an Assignment (calls void
+Assignment::Store()))doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeStrictDisjunctiveConstraint =
+ R"doc(This constraint forces all interval vars into an non-overlapping
+sequence. Intervals with zero durations cannot overlap with over
+intervals.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSubCircuit =
+ R"doc(Force the "nexts" variable to create a complete Hamiltonian path for
+those that do not loop upon themselves.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSum =
+ R"doc(left + right.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSum_2 =
+ R"doc(expr + value.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSum_3 =
+ R"doc(sum of all vars.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSumEquality =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSumEquality_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSumGreaterOrEqual =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSumLessOrEqual =
+ R"doc(Variation on arrays.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSumObjectiveFilter =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSumObjectiveFilter_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSymmetryManager =
+ R"doc(Symmetry Breaking.)doc";
+
+static const char* __doc_operations_research_Solver_MakeSymmetryManager_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSymmetryManager_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSymmetryManager_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeSymmetryManager_5 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeTabuSearch =
+ R"doc(Creates a Tabu Search monitor. In the context of local search the
+behavior is similar to MakeOptimize(), creating an objective in a
+given sense. The behavior differs once a local optimum is reached:
+thereafter solutions which degrade the value of the objective are
+allowed if they are not "tabu". A solution is "tabu" if it doesn't
+respect the following rules: - improving the best solution found so
+far - variables in the "keep" list must keep their value, variables in
+the "forbid" list must not take the value they have in the list.
+Variables with new values enter the tabu lists after each new solution
+found and leave the lists after a given number of iterations (called
+tenure). Only the variables passed to the method can enter the lists.
+The tabu criterion is softened by the tabu factor which gives the
+number of "tabu" violations which is tolerated; a factor of 1 means no
+violations allowed; a factor of 0 means all violations are allowed.)doc";
+
+static const char* __doc_operations_research_Solver_MakeTemporalDisjunction =
+ R"doc(This constraint implements a temporal disjunction between two interval
+vars t1 and t2. 'alt' indicates which alternative was chosen (alt == 0
+is equivalent to t1 before t2).)doc";
+
+static const char* __doc_operations_research_Solver_MakeTemporalDisjunction_2 =
+ R"doc(This constraint implements a temporal disjunction between two interval
+vars.)doc";
+
+static const char* __doc_operations_research_Solver_MakeTransitionConstraint =
+ R"doc(This constraint create a finite automaton that will check the sequence
+of variables vars. It uses a transition table called
+'transition_table'. Each transition is a triple (current_state,
+variable_value, new_state). The initial state is given, and the set of
+accepted states is decribed by 'final_states'. These states are hidden
+inside the constraint. Only the transitions (i.e. the variables) are
+visible.)doc";
+
+static const char* __doc_operations_research_Solver_MakeTransitionConstraint_2 =
+ R"doc(This constraint create a finite automaton that will check the sequence
+of variables vars. It uses a transition table called
+'transition_table'. Each transition is a triple (current_state,
+variable_value, new_state). The initial state is given, and the set of
+accepted states is decribed by 'final_states'. These states are hidden
+inside the constraint. Only the transitions (i.e. the variables) are
+visible.)doc";
+
+static const char* __doc_operations_research_Solver_MakeTrueConstraint =
+ R"doc(This constraint always succeeds.)doc";
+
+static const char* __doc_operations_research_Solver_MakeVariableDegreeVisitor =
+ R"doc(Compute the number of constraints a variable is attached to.)doc";
+
+static const char* __doc_operations_research_Solver_MakeVariableDomainFilter =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeVariableGreaterOrEqualValue =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MakeVariableLessOrEqualValue = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MakeWeightedMaximize =
+ R"doc(Creates a maximization weigthed objective.)doc";
+
+static const char* __doc_operations_research_Solver_MakeWeightedMaximize_2 =
+ R"doc(Creates a maximization weigthed objective.)doc";
+
+static const char* __doc_operations_research_Solver_MakeWeightedMinimize =
+ R"doc(Creates a minimization weighted objective. The actual objective is
+scalar_prod(sub_objectives, weights).)doc";
+
+static const char* __doc_operations_research_Solver_MakeWeightedMinimize_2 =
+ R"doc(Creates a minimization weighted objective. The actual objective is
+scalar_prod(sub_objectives, weights).)doc";
+
+static const char* __doc_operations_research_Solver_MakeWeightedOptimize =
+ R"doc(Creates a weighted objective with a given sense (true = maximization).)doc";
+
+static const char* __doc_operations_research_Solver_MakeWeightedOptimize_2 =
+ R"doc(Creates a weighted objective with a given sense (true = maximization).)doc";
+
+static const char* __doc_operations_research_Solver_MarkerType =
+ R"doc(This enum is used internally in private methods Solver::PushState and
+Solver::PopState to tag states in the search tree.)doc";
+
+static const char* __doc_operations_research_Solver_MarkerType_CHOICE_POINT =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MarkerType_REVERSIBLE_ACTION = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MarkerType_SENTINEL =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MarkerType_SIMPLE_MARKER =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MemoryUsage =
+ R"doc(Current memory usage in bytes)doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent =
+ R"doc(Search monitor events.)doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent_kAccept =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent_kAcceptDelta =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kAcceptNeighbor = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kAcceptSolution = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kAcceptUncheckedNeighbor =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kAfterDecision = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kApplyDecision = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent_kAtSolution =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent_kBeginFail =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kBeginInitialPropagation =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kBeginNextDecision =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent_kEndFail =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kEndInitialPropagation =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kEndNextDecision =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent_kEnterSearch =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent_kExitSearch =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kIsUncheckedSolutionLimitReached =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent_kLast =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_MonitorEvent_kLocalOptimum =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kNoMoreSolutions =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kPeriodicCheck = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kProgressPercent =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kRefuteDecision = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MonitorEvent_kRestartSearch = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_MultiArmedBanditConcatenateOperators =
+ R"doc(Creates a local search operator which concatenates a vector of
+operators. Uses Multi-Armed Bandit approach for choosing the next
+operator to use. Sorts operators based on Upper Confidence Bound
+Algorithm which evaluates each operator as sum of average improvement
+and exploration function.
+
+Updates the order of operators when accepts a neighbor with objective
+improvement.)doc";
+
+static const char* __doc_operations_research_Solver_NameAllVariables =
+ R"doc(Returns whether all variables should be named.)doc";
+
+static const char* __doc_operations_research_Solver_NewSearch =
+ R"doc(@{ Decomposed search. The code for a top level search should look like
+solver->NewSearch(db); while (solver->NextSolution()) { //.. use the
+current solution } solver()->EndSearch();)doc";
+
+static const char* __doc_operations_research_Solver_NewSearch_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_NewSearch_3 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_NewSearch_4 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_NewSearch_5 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_NewSearch_6 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_NextSolution = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Now =
+ R"doc(The 'absolute time' as seen by the solver. Unless a user-provided
+clock was injected via SetClock() (eg. for unit tests), this is a real
+walltime, shifted so that it was 0 at construction. All so-called
+"walltime" limits are relative to this time.)doc";
+
+static const char* __doc_operations_research_Solver_OptimizationDirection =
+ R"doc(Optimization directions.)doc";
+
+static const char*
+ __doc_operations_research_Solver_OptimizationDirection_MAXIMIZATION =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_OptimizationDirection_MINIMIZATION =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_OptimizationDirection_NOT_SET =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_ParentSearch =
+ R"doc(Returns the Search object which is the parent of the active search,
+i.e., the search below the top of the stack. If the active search is
+at the bottom of the stack, returns the active search.)doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification =
+ R"doc(A constraint that maintains the energy cost of paths. Energy is the
+integral of force applied over distance. More formally, the energy
+used on a path is: energy[path] = sum(node | paths[node] == path /\
+node not end) forces[next[node]] * distances[node] where forces[n] is
+the force needed to move loads accumulated until, but excluding weight
+and distances[n] is the distance from n to its successor. For
+instance, if a path has a route with two pickup/delivery pairs where
+the first shipment weighs 1 unit, the second weighs 2 units, and the
+distance between nodes is one, the {force/distance} of nodes would be:
+start{0/1} P1{0/1} P2{1/1} D1{3/1} D2{2/1} end{0/0}. The energy would
+be 0*1 + 1*1 + 3*1 + 2*1 + 0*1. The cost per unit of energy is
+cost_per_unit_below_threshold until the force reaches the threshold,
+then it is cost_per_unit_above_threshold: min(threshold,
+force.CumulVar(Next(node))) * distance.TransitVar(node) *
+cost_per_unit_below_threshold + max(0, force.CumulVar(Next(node)) -
+threshold) * distance.TransitVar(node) *
+cost_per_unit_above_threshold.)doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_EnergyCost =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_EnergyCost_IsNull =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_EnergyCost_cost_per_unit_above_threshold =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_EnergyCost_cost_per_unit_below_threshold =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_EnergyCost_threshold =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_costs =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_distances =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_forces =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_nexts =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_path_ends =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_path_energy_costs =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_path_starts =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_path_used_when_empty =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_PathEnergyCostConstraintSpecification_paths =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_PopState = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_PopState_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_ProcessConstraints =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_PushSentinel = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_PushState =
+ R"doc(The PushState and PopState methods manipulates the states of the
+reversible objects. They are visible only because they are useful to
+write unitary tests.)doc";
+
+static const char* __doc_operations_research_Solver_PushState_2 =
+ R"doc(Initialization. To be called by the constructors only.)doc";
+
+static const char* __doc_operations_research_Solver_Rand32 =
+ R"doc(Returns a random value between 0 and 'size' - 1;)doc";
+
+static const char* __doc_operations_research_Solver_Rand64 =
+ R"doc(Returns a random value between 0 and 'size' - 1;)doc";
+
+static const char* __doc_operations_research_Solver_RandomConcatenateOperators =
+ R"doc(Randomized version of local search concatenator; calls a random
+operator at each call to MakeNextNeighbor().)doc";
+
+static const char*
+ __doc_operations_research_Solver_RandomConcatenateOperators_2 =
+ R"doc(Randomized version of local search concatenator; calls a random
+operator at each call to MakeNextNeighbor(). The provided seed is used
+to initialize the random number generator.)doc";
+
+static const char* __doc_operations_research_Solver_ReSeed =
+ R"doc(Reseed the solver random generator.)doc";
+
+static const char* __doc_operations_research_Solver_RegisterDemon =
+ R"doc(Adds a new demon and wraps it inside a DemonProfiler if necessary.)doc";
+
+static const char* __doc_operations_research_Solver_RegisterIntExpr =
+ R"doc(Registers a new IntExpr and wraps it inside a TraceIntExpr if
+necessary.)doc";
+
+static const char* __doc_operations_research_Solver_RegisterIntVar =
+ R"doc(Registers a new IntVar and wraps it inside a TraceIntVar if necessary.)doc";
+
+static const char* __doc_operations_research_Solver_RegisterIntervalVar =
+ R"doc(Registers a new IntervalVar and wraps it inside a TraceIntervalVar if
+necessary.)doc";
+
+static const char* __doc_operations_research_Solver_RegularLimit =
+ R"doc(Creates a search limit that constrains the running time.)doc";
+
+static const char* __doc_operations_research_Solver_RestartCurrentSearch =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_RestartSearch = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_RevAlloc =
+ R"doc(Registers the given object as being reversible. By calling this
+method, the caller gives ownership of the object to the solver, which
+will delete it when there is a backtrack out of the current state.
+
+Returns the argument for convenience: this way, the caller may
+directly invoke a constructor in the argument, without having to store
+the pointer first.
+
+This function is only for users that define their own subclasses of
+BaseObject: for all subclasses predefined in the library, the
+corresponding factory methods (e.g., MakeIntVar(...),
+MakeAllDifferent(...) already take care of the registration.)doc";
+
+static const char* __doc_operations_research_Solver_RevAllocArray =
+ R"doc(Like RevAlloc() above, but for an array of objects: the array must
+have been allocated with the new[] operator. The entire array will be
+deleted when backtracking out of the current state.
+
+This method is valid for arrays of int, int64_t, uint64_t, bool,
+BaseObject*, IntVar*, IntExpr*, and Constraint*.)doc";
+
+static const char* __doc_operations_research_Solver_RunUncheckedLocalSearch =
+ R"doc(Experimental: runs a local search on the given initial solution,
+checking the feasibility and the objective value of solutions using
+the filter manager only (solutions are never restored in the CP
+world). Only greedy descent is supported.)doc";
+
+static const char*
+ __doc_operations_research_Solver_RunUncheckedLocalSearchInternal =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SafeRevAlloc = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SafeRevAllocArray =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SafeRevAllocArray_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SafeRevAllocArray_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SafeRevAllocArray_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SafeRevAllocArray_5 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SafeRevAllocArray_6 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SafeRevAllocArray_7 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SafeRevAllocArray_8 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SaveAndAdd =
+ R"doc(All-in-one SaveAndAdd_value.)doc";
+
+static const char* __doc_operations_research_Solver_SaveAndSetValue =
+ R"doc(All-in-one SaveAndSetValue.)doc";
+
+static const char* __doc_operations_research_Solver_SaveValue =
+ R"doc(SaveValue() saves the value of the corresponding object. It must be
+called before modifying the object. The value will be restored upon
+backtrack.)doc";
+
+static const char* __doc_operations_research_Solver_SearchContext = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SearchContext_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SearchDepth =
+ R"doc(Gets the search depth of the current active search. Returns -1 if
+there is no active search opened.)doc";
+
+static const char* __doc_operations_research_Solver_SearchLeftDepth =
+ R"doc(Gets the search left depth of the current active search. Returns -1 if
+there is no active search opened.)doc";
+
+static const char* __doc_operations_research_Solver_SearchLimit =
+ R"doc(Creates a search limit that is reached when either of the underlying
+limit is reached. That is, the returned limit is more stringent than
+both argument limits.)doc";
+
+static const char* __doc_operations_research_Solver_SearchLogParameters =
+ R"doc(Creates a search monitor from logging parameters.)doc";
+
+static const char*
+ __doc_operations_research_Solver_SearchLogParameters_branch_period =
+ R"doc(SearchMonitors will display a periodic search log every branch_period
+branches explored.)doc";
+
+static const char*
+ __doc_operations_research_Solver_SearchLogParameters_display_callback =
+ R"doc(SearchMonitors will display the result of display_callback at each new
+solution found and when the search finishes if
+display_on_new_solutions_only is false.)doc";
+
+static const char*
+ __doc_operations_research_Solver_SearchLogParameters_display_on_new_solutions_only =
+ R"doc(To be used to protect from cases where display_callback assumes
+variables are instantiated, which only happens in AtSolution().)doc";
+
+static const char*
+ __doc_operations_research_Solver_SearchLogParameters_objective =
+ R"doc(SearchMonitors will display values of objective or variables (both
+cannot be used together).)doc";
+
+static const char*
+ __doc_operations_research_Solver_SearchLogParameters_offsets = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_SearchLogParameters_scaling_factors =
+ R"doc(When displayed, objective or var values will be scaled and offset by
+the given values in the following way: scaling_factor * (value +
+offset).)doc";
+
+static const char*
+ __doc_operations_research_Solver_SearchLogParameters_variables =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SequenceStrategy =
+ R"doc(Used for scheduling. Not yet implemented.)doc";
+
+static const char*
+ __doc_operations_research_Solver_SequenceStrategy_CHOOSE_MIN_SLACK_RANK_FORWARD =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_SequenceStrategy_CHOOSE_RANDOM_RANK_FORWARD =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_SequenceStrategy_SEQUENCE_DEFAULT =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_SequenceStrategy_SEQUENCE_SIMPLE =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SetBranchSelector =
+ R"doc(Sets the given branch selector on the current active search.)doc";
+
+static const char* __doc_operations_research_Solver_SetClock =
+ R"doc(Set the clock in the timer. Does not take ownership. For dependency
+injection.)doc";
+
+static const char* __doc_operations_research_Solver_SetName = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SetSearchContext =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SetUseFastLocalSearch =
+ R"doc(enabled for metaheuristics. Disables/enables fast local search.)doc";
+
+static const char* __doc_operations_research_Solver_ShouldFail =
+ R"doc(See http://cs/file:constraint_solver.i%20ShouldFail.)doc";
+
+static const char* __doc_operations_research_Solver_Solve =
+ R"doc(@{ Solves the problem using the given DecisionBuilder and returns true
+if a solution was found and accepted.
+
+These methods are the ones most users should use to search for a
+solution. Note that the definition of 'solution' is subtle. A solution
+here is defined as a leaf of the search tree with respect to the given
+decision builder for which there is no failure. What this means is
+that, contrary to intuition, a solution may not have all variables of
+the model bound. It is the responsibility of the decision builder to
+keep returning decisions until all variables are indeed bound. The
+most extreme counterexample is calling Solve with a trivial decision
+builder whose Next() method always returns nullptr. In this case,
+Solve immediately returns 'true', since not assigning any variable to
+any value is a solution, unless the root node propagation discovers
+that the model is infeasible.
+
+This function must be called either from outside of search, or from
+within the Next() method of a decision builder.
+
+Solve will terminate whenever any of the following event arise: * A
+search monitor asks the solver to terminate the search by calling
+solver()->FinishCurrentSearch(). * A solution is found that is
+accepted by all search monitors, and none of the search monitors
+decides to search for another one.
+
+Upon search termination, there will be a series of backtracks all the
+way to the top level. This means that a user cannot expect to inspect
+the solution by querying variables after a call to Solve(): all the
+information will be lost. In order to do something with the solution,
+the user must either:
+
+* Use a search monitor that can process such a leaf. See, in
+particular, the SolutionCollector class. * Do not use Solve. Instead,
+use the more fine-grained approach using methods NewSearch(...),
+NextSolution(), and EndSearch().
+
+Parameter ``db``:
+ The decision builder that will generate the search tree.
+
+Parameter ``monitors``:
+ A vector of search monitors that will be notified of various
+ events during the search. In their reaction to these events, such
+ monitors may influence the search.)doc";
+
+static const char* __doc_operations_research_Solver_Solve_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Solve_3 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Solve_4 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Solve_5 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Solve_6 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SolveAndCommit =
+ R"doc(SolveAndCommit using a decision builder and up to three search
+monitors, usually one for the objective, one for the limits and one to
+collect solutions.
+
+The difference between a SolveAndCommit() and a Solve() method call is
+the fact that SolveAndCommit will not backtrack all modifications at
+the end of the search. This method is only usable during the Next()
+method of a decision builder.)doc";
+
+static const char* __doc_operations_research_Solver_SolveAndCommit_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SolveAndCommit_3 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SolveAndCommit_4 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SolveAndCommit_5 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SolveDepth =
+ R"doc(Gets the number of nested searches. It returns 0 outside search, 1
+during the top level search, 2 or more in case of nested searches.)doc";
+
+static const char* __doc_operations_research_Solver_Solver =
+ R"doc(Solver API)doc";
+
+static const char* __doc_operations_research_Solver_Solver_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Solver_3 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_SolverState =
+ R"doc(This enum represents the state of the solver w.r.t. the search.)doc";
+
+static const char* __doc_operations_research_Solver_SolverState_AT_SOLUTION =
+ R"doc(After successful NextSolution and before EndSearch.)doc";
+
+static const char* __doc_operations_research_Solver_SolverState_IN_ROOT_NODE =
+ R"doc(Executing the root node.)doc";
+
+static const char* __doc_operations_research_Solver_SolverState_IN_SEARCH =
+ R"doc(Executing the search code.)doc";
+
+static const char*
+ __doc_operations_research_Solver_SolverState_NO_MORE_SOLUTIONS =
+ R"doc(After failed NextSolution and before EndSearch.)doc";
+
+static const char* __doc_operations_research_Solver_SolverState_OUTSIDE_SEARCH =
+ R"doc(Before search, after search.)doc";
+
+static const char*
+ __doc_operations_research_Solver_SolverState_PROBLEM_INFEASIBLE =
+ R"doc(After search, the model is infeasible.)doc";
+
+static const char* __doc_operations_research_Solver_TopLevelSearch =
+ R"doc(Returns the Search object that is at the bottom of the search stack.
+Contrast with ActiveSearch(), which returns the search at the top of
+the stack.)doc";
+
+static const char* __doc_operations_research_Solver_TopPeriodicCheck =
+ R"doc(Performs PeriodicCheck on the top-level search; for instance, can be
+called from a nested solve to check top-level limits.)doc";
+
+static const char* __doc_operations_research_Solver_TopProgressPercent =
+ R"doc(Returns a percentage representing the propress of the search before
+reaching the limits of the top-level search (can be called from a
+nested solve).)doc";
+
+static const char* __doc_operations_research_Solver_Try =
+ R"doc("Try"-builders "recursively". For instance, Try(a,b,c,d) will give a
+tree unbalanced to the right, whereas Try(Try(a,b), Try(b,c)) will
+give a balanced tree. Investigate if we should only provide the binary
+version and/or if we should balance automatically.)doc";
+
+static const char* __doc_operations_research_Solver_Try_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Try_3 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_Try_4 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_UnaryIntervalRelation =
+ R"doc(This enum is used in Solver::MakeIntervalVarRelation to specify the
+temporal relation between an interval t and an integer d.)doc";
+
+static const char*
+ __doc_operations_research_Solver_UnaryIntervalRelation_AVOID_DATE =
+ R"doc(STARTS_AFTER or ENDS_BEFORE, i.e. d is not in t. t starts after d,
+i.e. Start(t) >= d. t ends before d, i.e. End(t) <= d.)doc";
+
+static const char*
+ __doc_operations_research_Solver_UnaryIntervalRelation_CROSS_DATE =
+ R"doc(STARTS_BEFORE and ENDS_AFTER at the same time, i.e. d is in t. t
+starts before d, i.e. Start(t) <= d. t ends after d, i.e. End(t) >= d.)doc";
+
+static const char*
+ __doc_operations_research_Solver_UnaryIntervalRelation_ENDS_AFTER =
+ R"doc(t ends after d, i.e. End(t) >= d.)doc";
+
+static const char*
+ __doc_operations_research_Solver_UnaryIntervalRelation_ENDS_AT =
+ R"doc(t ends at d, i.e. End(t) == d.)doc";
+
+static const char*
+ __doc_operations_research_Solver_UnaryIntervalRelation_ENDS_BEFORE =
+ R"doc(t ends before d, i.e. End(t) <= d.)doc";
+
+static const char*
+ __doc_operations_research_Solver_UnaryIntervalRelation_STARTS_AFTER =
+ R"doc(t starts after d, i.e. Start(t) >= d.)doc";
+
+static const char*
+ __doc_operations_research_Solver_UnaryIntervalRelation_STARTS_AT =
+ R"doc(t starts at d, i.e. Start(t) == d.)doc";
+
+static const char*
+ __doc_operations_research_Solver_UnaryIntervalRelation_STARTS_BEFORE =
+ R"doc(t starts before d, i.e. Start(t) <= d.)doc";
+
+static const char* __doc_operations_research_Solver_UnfreezeQueue = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_UnsafeRevAlloc =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_UnsafeRevAllocArray =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_UnsafeRevAllocArrayAux =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_UnsafeRevAllocAux =
+ R"doc(UnsafeRevAlloc is used internally for cells in SimpleRevFIFO and other
+structures like this.)doc";
+
+static const char* __doc_operations_research_Solver_UseFastLocalSearch =
+ R"doc(Returns true if fast local search is enabled.)doc";
+
+static const char* __doc_operations_research_Solver_VirtualMemorySize =
+ R"doc(Current virtual memory size in bytes)doc";
+
+static const char* __doc_operations_research_Solver_accepted_neighbors =
+ R"doc(The number of accepted neighbors.)doc";
+
+static const char* __doc_operations_research_Solver_accepted_neighbors_2 =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_additional_constraint_index = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_additional_constraints_list = R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_additional_constraints_parent_list =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_anonymous_variable_index =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_balancing_decision =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_balancing_decision_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_branches =
+ R"doc(The number of branches explored since the creation of the solver.)doc";
+
+static const char* __doc_operations_research_Solver_branches_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_cached_constants =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_cast_constraints =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_cast_information =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_check_alloc_state =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_clear_fail_intercept =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_const_parameters =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_constraint_index =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_constraints =
+ R"doc(Counts the number of constraints that have been added to the solver
+before the search.)doc";
+
+static const char* __doc_operations_research_Solver_constraints_list =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_context =
+ R"doc(Gets the current context of the search.)doc";
+
+static const char* __doc_operations_research_Solver_context_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_decisions = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_demon_profiler =
+ R"doc(Access to demon profiler.)doc";
+
+static const char* __doc_operations_research_Solver_demon_profiler_2 =
+ R"doc(Demon monitor)doc";
+
+static const char* __doc_operations_research_Solver_demon_runs =
+ R"doc(The number of demons executed during search for a given priority.)doc";
+
+static const char* __doc_operations_research_Solver_demon_runs_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_empty_name = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_fail_decision = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_fail_intercept =
+ R"doc(intercept failures)doc";
+
+static const char* __doc_operations_research_Solver_fail_stamp =
+ R"doc(The fail_stamp() is incremented after each backtrack.)doc";
+
+static const char* __doc_operations_research_Solver_fail_stamp_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_fails = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_failures =
+ R"doc(The number of failures encountered since the creation of the solver.)doc";
+
+static const char* __doc_operations_research_Solver_false_constraint =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_filtered_neighbors =
+ R"doc(The number of filtered neighbors (neighbors accepted by filters).)doc";
+
+static const char* __doc_operations_research_Solver_filtered_neighbors_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_local_search_monitor =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_local_search_profiler =
+ R"doc(Local search profiler monitor)doc";
+
+static const char* __doc_operations_research_Solver_local_search_state =
+ R"doc(Local search state.)doc";
+
+static const char* __doc_operations_research_Solver_model_cache = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_model_name =
+ R"doc(Returns the name of the model.)doc";
+
+static const char* __doc_operations_research_Solver_name = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_neighbors =
+ R"doc(The number of neighbors created.)doc";
+
+static const char* __doc_operations_research_Solver_neighbors_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_num_int_vars = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_operator_assign =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_optimization_direction =
+ R"doc(The direction of optimization, getter and setter.)doc";
+
+static const char* __doc_operations_research_Solver_optimization_direction_2 =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_parameters =
+ R"doc(Stored Parameters.)doc";
+
+static const char* __doc_operations_research_Solver_parameters_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_print_trace = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_propagation_monitor =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_propagation_object_names =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_queue = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_random = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_reset_action_on_fail =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_searches = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_set_action_on_fail =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_set_context =
+ R"doc(Sets the current context of the search.)doc";
+
+static const char* __doc_operations_research_Solver_set_fail_intercept =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_set_optimization_direction =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_set_variable_to_clean_on_fail =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_should_fail = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_solutions =
+ R"doc(The number of solutions found since the start of the search.)doc";
+
+static const char* __doc_operations_research_Solver_stamp =
+ R"doc(The stamp indicates how many moves in the search tree we have
+performed. It is useful to detect if we need to update same lazy
+structures.)doc";
+
+static const char* __doc_operations_research_Solver_state =
+ R"doc(State of the solver.)doc";
+
+static const char* __doc_operations_research_Solver_state_2 = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_timer = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_tmp_vector =
+ R"doc(Unsafe temporary vector. It is used to avoid leaks in operations that
+need storage and that may fail. See IntVar::SetValues() for instance.
+It is not locked; do not use in a multi-threaded or reentrant setup.)doc";
+
+static const char* __doc_operations_research_Solver_trail = R"doc()doc";
+
+static const char* __doc_operations_research_Solver_true_constraint =
+ R"doc(Cached constraints.)doc";
+
+static const char* __doc_operations_research_Solver_unchecked_solutions =
+ R"doc(The number of unchecked solutions found by local search.)doc";
+
+static const char*
+ __doc_operations_research_Solver_unnamed_enum_at_util_operations_research_constraint_solver_constraint_solver_h_3315_3 =
+ R"doc(interval of constants cached, inclusive:)doc";
+
+static const char*
+ __doc_operations_research_Solver_unnamed_enum_at_util_operations_research_constraint_solver_constraint_solver_h_3315_3_MAX_CACHED_INT_CONST =
+ R"doc()doc";
+
+static const char*
+ __doc_operations_research_Solver_unnamed_enum_at_util_operations_research_constraint_solver_constraint_solver_h_3315_3_MIN_CACHED_INT_CONST =
+ R"doc()doc";
+
+static const char* __doc_operations_research_Solver_use_fast_local_search =
+ R"doc(Local search mode)doc";
+
+static const char* __doc_operations_research_Solver_wall_time =
+ R"doc(DEPRECATED: Use Now() instead. Time elapsed, in ms since the creation
+of the solver.)doc";
+
+static const char* __doc_operations_research_StateInfo = R"doc()doc";
+
+static const char* __doc_operations_research_SymmetryBreaker = R"doc()doc";
+
+static const char* __doc_operations_research_Trail = R"doc()doc";
+
+static const char* __doc_operations_research_Zero =
+ R"doc(This method returns 0. It is useful when 0 can be cast either as a
+pointer or as an integer value and thus lead to an ambiguous function
+call.)doc";
+
+static const char* __doc_operations_research_operator_lshift = R"doc()doc";
+
+static const char* __doc_operations_research_operator_lshift_2 = R"doc()doc";
+
+static const char* __doc_operations_research_operator_lshift_3 = R"doc()doc";
+
+static const char* __doc_util_Clock = R"doc()doc";
+
+#if defined(__GNUG__)
+#pragma GCC diagnostic pop
+#endif
diff --git a/ortools/constraint_solver/python/constraint_solver_test.py b/ortools/constraint_solver/python/constraint_solver_test.py
new file mode 100644
index 00000000000..7806bf06753
--- /dev/null
+++ b/ortools/constraint_solver/python/constraint_solver_test.py
@@ -0,0 +1,83 @@
+#!/usr/bin/env python3
+# Copyright 2010-2025 Google LLC
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Test for constraint_solver pybind11 layer."""
+
+from absl.testing import absltest
+from ortools.constraint_solver.python import constraint_solver
+
+
+class ConstraintSolverTest(absltest.TestCase):
+
+ def test_create_solver(self):
+ print("test_create_solver")
+ solver = constraint_solver.Solver("test_create_solver")
+ print(solver)
+
+ def test_create_int_var(self):
+ print("test_create_int_var")
+ solver = constraint_solver.Solver("test_create_int_var")
+ x = solver.new_int_var(0, 10, "x")
+ self.assertEqual(str(x), "x(0..10)")
+ self.assertEqual(x.min, 0)
+ self.assertEqual(x.max, 10)
+ self.assertEqual(x.name, "x")
+
+ y = solver.new_int_var([0, 2, 4])
+ self.assertEqual(y.min, 0)
+ self.assertEqual(y.max, 4)
+ self.assertEmpty(y.name)
+ y.name = "y"
+ self.assertEqual(y.name, "y")
+
+ def test_create_int_expr(self):
+ print("test_create_int_expr")
+ solver = constraint_solver.Solver("test_create_int_expr")
+ x = solver.new_int_var(0, 10, "x")
+ y = solver.new_int_var(0, 10, "y")
+
+ x_plus_3 = x + 3
+ self.assertEqual(str(x_plus_3), "(x(0..10) + 3)")
+ print(x_plus_3)
+ self.assertEqual(x_plus_3.min, 3)
+ self.assertEqual(x_plus_3.max, 13)
+
+ self.assertEqual(str(x * 5), "(x(0..10) * 5)")
+ self.assertEqual(str(x + y), "(x(0..10) + y(0..10))")
+ self.assertEqual(str(2 + x), "(x(0..10) + 2)")
+ self.assertEqual(str(7 * x), "(x(0..10) * 7)")
+ self.assertEqual(str(x * y), "(x(0..10) * y(0..10))")
+ self.assertEqual(str(x + 2 * y + 5), "((x(0..10) + (y(0..10) * 2)) + 5)")
+
+ def test_fail_outside_solve(self):
+ print("test_fail_outside_solve")
+ solver = constraint_solver.Solver("test_fail_outside_solve")
+ x = solver.new_int_var(0, 10, "x")
+ try:
+ x.set_min(20)
+ except ValueError:
+ print(" fail caught")
+
+ def test_rabbits_pheasants(self):
+ print("test_rabbits_pheasants")
+ solver = constraint_solver.Solver("test_rabbits_pheasants")
+ rabbits = solver.new_int_var(0, 20, "rabbits")
+ pheasants = solver.new_int_var(0, 20, "pheasants")
+ solver.add(rabbits + pheasants == 20)
+ solver.add(4 * rabbits + 2 * pheasants == 56)
+ solver.accept(solver.print_model_visitor())
+
+
+if __name__ == "__main__":
+ absltest.main()
diff --git a/ortools/constraint_solver/python/pywrapcp_test.py b/ortools/constraint_solver/python/pywrapcp_test.py
index b0be201a264..44eb4c5f85c 100755
--- a/ortools/constraint_solver/python/pywrapcp_test.py
+++ b/ortools/constraint_solver/python/pywrapcp_test.py
@@ -23,1370 +23,1403 @@
def inc_callback(i):
- return i + 1
+ return i + 1
class ClassIncCallback:
- def __init__(self, increment):
- self.__increment = increment
+ def __init__(self, increment):
+ self.__increment = increment
- def inc_method(self, i):
- return i + self.__increment
+ def inc_method(self, i):
+ return i + self.__increment
class TestIntVarContainerAPI(absltest.TestCase):
- def test_contains(self):
- self.assertTrue(
- hasattr(pywrapcp.IntVarContainer, "Contains"),
- dir(pywrapcp.IntVarContainer),
- )
+ def test_contains(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntVarContainer, "Contains"),
+ dir(pywrapcp.IntVarContainer),
+ )
- def test_element(self):
- self.assertTrue(
- hasattr(pywrapcp.IntVarContainer, "Element"),
- dir(pywrapcp.IntVarContainer),
- )
+ def test_element(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntVarContainer, "Element"),
+ dir(pywrapcp.IntVarContainer),
+ )
- def test_size(self):
- self.assertTrue(
- hasattr(pywrapcp.IntVarContainer, "Size"), dir(pywrapcp.IntVarContainer)
- )
+ def test_size(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntVarContainer, "Size"), dir(pywrapcp.IntVarContainer)
+ )
- def test_store(self):
- self.assertTrue(
- hasattr(pywrapcp.IntVarContainer, "Store"),
- dir(pywrapcp.IntVarContainer),
- )
+ def test_store(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntVarContainer, "Store"),
+ dir(pywrapcp.IntVarContainer),
+ )
- def test_restore(self):
- self.assertTrue(
- hasattr(pywrapcp.IntVarContainer, "Restore"),
- dir(pywrapcp.IntVarContainer),
- )
+ def test_restore(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntVarContainer, "Restore"),
+ dir(pywrapcp.IntVarContainer),
+ )
class TestIntervalVarContainerAPI(absltest.TestCase):
- def test_contains(self):
- self.assertTrue(
- hasattr(pywrapcp.IntervalVarContainer, "Contains"),
- dir(pywrapcp.IntervalVarContainer),
- )
-
- def test_element(self):
- self.assertTrue(
- hasattr(pywrapcp.IntervalVarContainer, "Element"),
- dir(pywrapcp.IntervalVarContainer),
- )
-
- def test_size(self):
- self.assertTrue(
- hasattr(pywrapcp.IntervalVarContainer, "Size"),
- dir(pywrapcp.IntervalVarContainer),
- )
-
- def test_store(self):
- self.assertTrue(
- hasattr(pywrapcp.IntervalVarContainer, "Store"),
- dir(pywrapcp.IntervalVarContainer),
- )
-
- def test_restore(self):
- self.assertTrue(
- hasattr(pywrapcp.IntervalVarContainer, "Restore"),
- dir(pywrapcp.IntervalVarContainer),
- )
+ def test_contains(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntervalVarContainer, "Contains"),
+ dir(pywrapcp.IntervalVarContainer),
+ )
+
+ def test_element(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntervalVarContainer, "Element"),
+ dir(pywrapcp.IntervalVarContainer),
+ )
+
+ def test_size(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntervalVarContainer, "Size"),
+ dir(pywrapcp.IntervalVarContainer),
+ )
+
+ def test_store(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntervalVarContainer, "Store"),
+ dir(pywrapcp.IntervalVarContainer),
+ )
+
+ def test_restore(self):
+ self.assertTrue(
+ hasattr(pywrapcp.IntervalVarContainer, "Restore"),
+ dir(pywrapcp.IntervalVarContainer),
+ )
class TestSequenceVarContainerAPI(absltest.TestCase):
- def test_contains(self):
- self.assertTrue(
- hasattr(pywrapcp.SequenceVarContainer, "Contains"),
- dir(pywrapcp.SequenceVarContainer),
- )
-
- def test_element(self):
- self.assertTrue(
- hasattr(pywrapcp.SequenceVarContainer, "Element"),
- dir(pywrapcp.SequenceVarContainer),
- )
-
- def test_size(self):
- self.assertTrue(
- hasattr(pywrapcp.SequenceVarContainer, "Size"),
- dir(pywrapcp.SequenceVarContainer),
- )
-
- def test_store(self):
- self.assertTrue(
- hasattr(pywrapcp.SequenceVarContainer, "Store"),
- dir(pywrapcp.SequenceVarContainer),
- )
-
- def test_restore(self):
- self.assertTrue(
- hasattr(pywrapcp.SequenceVarContainer, "Restore"),
- dir(pywrapcp.SequenceVarContainer),
- )
+ def test_contains(self):
+ self.assertTrue(
+ hasattr(pywrapcp.SequenceVarContainer, "Contains"),
+ dir(pywrapcp.SequenceVarContainer),
+ )
+
+ def test_element(self):
+ self.assertTrue(
+ hasattr(pywrapcp.SequenceVarContainer, "Element"),
+ dir(pywrapcp.SequenceVarContainer),
+ )
+
+ def test_size(self):
+ self.assertTrue(
+ hasattr(pywrapcp.SequenceVarContainer, "Size"),
+ dir(pywrapcp.SequenceVarContainer),
+ )
+
+ def test_store(self):
+ self.assertTrue(
+ hasattr(pywrapcp.SequenceVarContainer, "Store"),
+ dir(pywrapcp.SequenceVarContainer),
+ )
+
+ def test_restore(self):
+ self.assertTrue(
+ hasattr(pywrapcp.SequenceVarContainer, "Restore"),
+ dir(pywrapcp.SequenceVarContainer),
+ )
class PyWrapCPTest(absltest.TestCase):
- def testRabbitPheasant(self):
- # Create the solver.
- solver = pywrapcp.Solver("testRabbitPheasant")
-
- # Create the variables.
- pheasant = solver.IntVar(0, 100, "pheasant")
- rabbit = solver.IntVar(0, 100, "rabbit")
-
- # Create the constraints.
- solver.Add(pheasant + rabbit == 20)
- solver.Add(pheasant * 2 + rabbit * 4 == 56)
-
- # Create the search phase.
- db = solver.Phase(
- [rabbit, pheasant], solver.INT_VAR_DEFAULT, solver.ASSIGN_MIN_VALUE
- )
-
- # Create assignment
- solution = solver.Assignment()
- solution.Add(rabbit)
- solution.Add(pheasant)
-
- collector = solver.FirstSolutionCollector(solution)
-
- # And solve.
- solver.Solve(db, collector)
-
- self.assertEqual(1, collector.SolutionCount())
- current = collector.Solution(0)
-
- self.assertEqual(12, current.Value(pheasant))
- self.assertEqual(8, current.Value(rabbit))
-
- def testSolverParameters(self):
- # Create the parameters.
- params = pywrapcp.Solver.DefaultSolverParameters()
- self.assertIsInstance(params, solver_parameters_pb2.ConstraintSolverParameters)
- self.assertFalse(params.trace_propagation)
- params.trace_propagation = True
- self.assertTrue(params.trace_propagation)
-
- # Create the solver.
- solver = pywrapcp.Solver("testRabbitPheasantWithParameters", params)
- inside_params = solver.Parameters()
- self.assertTrue(inside_params.trace_propagation)
-
- def testSolverParametersFields(self):
- params = solver_parameters_pb2.ConstraintSolverParameters()
- bool_params = [
- "store_names",
- "name_cast_variables",
- "name_all_variables",
- "profile_propagation",
- "trace_propagation",
- "trace_search",
- "print_model",
- "print_model_stats",
- "print_added_constraints",
- "disable_solve",
- ]
- for p in bool_params:
- for v in [True, False]:
- setattr(params, p, v)
- self.assertEqual(getattr(params, p), v)
-
- int_params = ["trail_block_size", "array_split_size"]
- for p in int_params:
- for v in [10, 100]:
- setattr(params, p, v)
- self.assertEqual(getattr(params, p), v)
-
- string_params = ["profile_file"]
- for p in string_params:
- for v in ["", "tmp_file"]:
- setattr(params, p, v)
- self.assertEqual(getattr(params, p), v)
-
- def testIntVarAPI(self):
- # Create the solver.
- solver = pywrapcp.Solver("testIntVarAPI")
-
- c = solver.IntConst(3, "c")
- self.assertEqual(3, c.Min())
- self.assertEqual(3, c.Max())
- self.assertEqual(3, c.Value())
- self.assertTrue(c.Bound())
-
- b = solver.BoolVar("b")
- self.assertEqual(0, b.Min())
- self.assertEqual(1, b.Max())
-
- v1 = solver.IntVar(3, 10, "v1")
- self.assertEqual(3, v1.Min())
- self.assertEqual(10, v1.Max())
-
- v2 = solver.IntVar([1, 5, 3], "v2")
- self.assertEqual(1, v2.Min())
- self.assertEqual(5, v2.Max())
- self.assertEqual(3, v2.Size())
-
- # pylint: disable=too-many-statements
- def testIntegerArithmetic(self):
- solver = pywrapcp.Solver("testIntegerArithmetic")
-
- v1 = solver.IntVar(0, 10, "v1")
- v2 = solver.IntVar(0, 10, "v2")
- v3 = solver.IntVar(0, 10, "v3")
-
- e1 = v1 + v2
- e2 = v1 + 2
- e3 = solver.Sum([v1, v2, v3 * 3])
-
- e4 = v1 - 3
- e5 = v1 - v2
- e6 = -v1
-
- e7 = abs(e6)
- e8 = v3.Square()
-
- e9 = v1 * 3
- e10 = v1 * v2
-
- e11 = v2.IndexOf([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
- e11b = v2.IndexOf([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
- e12 = solver.Min(e1, e2)
- e13 = solver.Min(e3, 3)
- e14 = solver.Min([e1 + 1, e2 + 2, e3 + 3])
-
- e15 = solver.Max(e1, e2)
- e16 = solver.Max(e3, 3)
- e17 = solver.Max([e1 + 1, e2 + 2, e3 + 3])
-
- solver.Add(v1 == 1)
- solver.Add(v2 == 2)
- solver.Add(v3 == 3)
-
- db = solver.Phase([v1, v2, v3], solver.INT_VAR_DEFAULT, solver.ASSIGN_MIN_VALUE)
-
- solver.NewSearch(db)
- solver.NextSolution()
-
- self.assertEqual(1, v1.Value())
- self.assertEqual(2, v2.Value())
- self.assertEqual(3, v3.Value())
-
- self.assertEqual(3, e1.Min())
- self.assertEqual(3, e1.Max())
- self.assertEqual(3, e2.Min())
- self.assertEqual(3, e2.Max())
- self.assertEqual(12, e3.Min())
- self.assertEqual(12, e3.Max())
- self.assertEqual(-2, e4.Min())
- self.assertEqual(-2, e4.Max())
- self.assertEqual(-1, e5.Min())
- self.assertEqual(-1, e5.Max())
- self.assertEqual(-1, e6.Min())
- self.assertEqual(-1, e6.Max())
- self.assertEqual(1, e7.Min())
- self.assertEqual(1, e7.Max())
- self.assertEqual(9, e8.Min())
- self.assertEqual(9, e8.Max())
- self.assertEqual(3, e9.Min())
- self.assertEqual(3, e9.Max())
- self.assertEqual(2, e10.Min())
- self.assertEqual(2, e10.Max())
- self.assertEqual(4, e11.Min())
- self.assertEqual(4, e11.Max())
- self.assertEqual(4, e11b.Min())
- self.assertEqual(4, e11b.Max())
- self.assertEqual(3, e12.Min())
- self.assertEqual(3, e12.Max())
- self.assertEqual(3, e13.Min())
- self.assertEqual(3, e13.Max())
- self.assertEqual(4, e14.Min())
- self.assertEqual(4, e14.Max())
- self.assertEqual(3, e15.Min())
- self.assertEqual(3, e15.Max())
- self.assertEqual(12, e16.Min())
- self.assertEqual(12, e16.Max())
- self.assertEqual(15, e17.Min())
- self.assertEqual(15, e17.Max())
- solver.EndSearch()
-
- def testStatusVar(self):
- solver = pywrapcp.Solver("testStatusVar")
- v1 = solver.IntVar(0, 10, "v1")
- v2 = solver.IntVar(0, 10, "v2")
- c1 = v1 == 3
- c2 = v1 != 2
- print(c1)
- print(c1.Var())
- print(c2)
- print(c2.Var())
- e3 = v1 + c1
- print(e3)
- e4 = c1 + c2 == 1
- print(e4)
- e5 = solver.Min(c1, c2)
- print(e5)
- e6 = solver.Max([c1, c2, e3])
- print(e6)
- e7 = 1 + c2
- print(e7)
- e8 = solver.Max([v1 > 3, v1 <= 2, v2, v2 <= 0, v2 > 5])
- print(e8)
- e9 = solver.Min([v1 == v2, v1 != v2, v1 < v2, v1 > v2, v1 <= v2, v1 >= v2])
- print(e9)
-
- def testAllowedAssignment(self):
- solver = pywrapcp.Solver("testAllowedAssignment")
-
- v1 = solver.IntVar(0, 10, "v1")
- v2 = solver.IntVar(0, 10, "v2")
- v3 = solver.IntVar(0, 10, "v3")
-
- tuples = [(0, 0, 0), (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)]
- dvars = [v1, v2, v3]
- solver.Add(solver.AllowedAssignments(dvars, tuples))
- db = solver.Phase(dvars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
-
- solver.NewSearch(db)
- counter = 0
- while solver.NextSolution():
- self.assertEqual(counter, v1.Value())
- self.assertEqual(counter, v2.Value())
- self.assertEqual(counter, v3.Value())
- counter += 1
- solver.EndSearch()
- self.assertEqual(5, counter)
-
- def testAllowedAssignment2(self):
- solver = pywrapcp.Solver("testAllowedAssignment")
-
- v1 = solver.IntVar(0, 10, "v1")
- v2 = solver.IntVar(0, 10, "v2")
- v3 = solver.IntVar(0, 10, "v3")
-
- dvars = [v1, v2, v3]
- solver.Add(solver.AllowedAssignments(dvars, [(x, x, x) for x in range(5)]))
- db = solver.Phase(dvars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
-
- solver.NewSearch(db)
- counter = 0
- while solver.NextSolution():
- self.assertEqual(counter, v1.Value())
- self.assertEqual(counter, v2.Value())
- self.assertEqual(counter, v3.Value())
- counter += 1
- solver.EndSearch()
- self.assertEqual(5, counter)
-
- def testIntExprToIntVarCast(self):
- solver = pywrapcp.Solver("testIntExprToIntVarCast")
-
- var1 = solver.IntVar(0, 10, "var1")
- var2 = solver.IntVar(0, 10, "var2")
- values = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
- # This test fails if the cast is not correctly done.
- expr = (var1 + var2).IndexOf(values)
- self.assertTrue(expr)
-
- def testIntExprToIntVarCastInSolution(self):
- solver = pywrapcp.Solver("testIntExprToIntVarCastInSolution")
-
- var1 = solver.IntVar(0, 10, "var1")
- var2 = solver.IntVar(0, 10, "var2")
- solution = solver.Assignment()
- expr = var1 + var2
- solution.Add(expr)
- solution.Store()
- # The next line fails if the cast is not correctly done.
- self.assertEqual(20, solution.Max(expr))
-
- def testIndexOf(self):
- solver = pywrapcp.Solver("element")
- index = solver.IntVar(0, 2, "index")
- element = index.IndexOf([1, 2, 3])
- self.assertEqual(1, element.Min())
- self.assertEqual(3, element.Max())
-
- def testElementFunction(self):
- solver = pywrapcp.Solver("element")
- index = solver.IntVar(0, 2, "index")
- element = solver.ElementFunction(inc_callback, index)
- self.assertEqual(1, element.Min())
- self.assertEqual(3, element.Max())
-
- def testElementMethod(self):
- solver = pywrapcp.Solver("element")
- index = solver.IntVar(0, 2, "index")
- class_callback = ClassIncCallback(2)
- class_method = class_callback.inc_method
- self.assertEqual(5, class_method(3))
- element = solver.ElementFunction(class_method, index)
- self.assertEqual(2, element.Min())
- self.assertEqual(4, element.Max())
-
- # TODO(user): better test all other ForwardSequence methods.
- def testForwardSequence(self):
- solver = pywrapcp.Solver("element")
- intervals = [
- solver.FixedDurationIntervalVar(0, 10, 5, False, "Youpi") for _ in range(10)
- ]
- disjunction = solver.DisjunctiveConstraint(intervals, "Blup")
- sequence = disjunction.SequenceVar()
- assignment = solver.Assignment()
- assignment.Add(sequence)
- assignment.SetForwardSequence(sequence, [1, 3, 5])
- self.assertListEqual(assignment.ForwardSequence(sequence), [1, 3, 5])
-
- def test_member(self):
- solver = pywrapcp.Solver("test member")
- x = solver.IntVar(1, 10, "x")
- ct = x.Member([1, 2, 3, 5])
- print("Constraint: {}".format(ct))
-
- def test_sparse_var(self):
- solver = pywrapcp.Solver("test sparse")
- x = solver.IntVar([1, 3, 5], "x")
- self.assertTrue(x.Contains(1))
- self.assertFalse(x.Contains(2))
- # print(x)
-
- def test_modulo(self):
- solver = pywrapcp.Solver("test modulo")
- x = solver.IntVar(0, 10, "x")
- y = solver.IntVar(2, 4, "y")
- print(x % 3)
- print(x % y)
-
- def test_modulo2(self):
- solver = pywrapcp.Solver("test modulo")
- x = solver.IntVar([-7, 7], "x")
- y = solver.IntVar([-4, 4], "y")
- z = (x % y).Var()
- t = (x // y).Var()
- db = solver.Phase([x, y], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
- solver.NewSearch(db)
- while solver.NextSolution():
- print(
- "x = %d, y = %d, x %% y = %d, x div y = %d"
- % (x.Value(), y.Value(), z.Value(), t.Value())
- )
- solver.EndSearch()
-
- def test_limit(self):
- solver = pywrapcp.Solver("test limit")
- # limit_proto = solver.DefaultSearchLimitParameters()
- limit_proto = search_limit_pb2.RegularLimitParameters(time=10000, branches=10)
- print("limit proto: {}".format(limit_proto))
- limit = solver.Limit(limit_proto)
- print("limit: {}".format(limit))
-
- def test_export(self):
- solver = pywrapcp.Solver("test export")
- x = solver.IntVar(1, 10, "x")
- ct = x.Member([1, 2, 3, 5])
- solver.Add(ct)
- # proto = model_pb2.CpModel()
- # proto.model = 'wrong name'
- # solver.ExportModel(proto)
- # print(repr(proto))
- # print(str(proto))
-
- def test_size_1_var(self):
- solver = pywrapcp.Solver("test_size_1_var")
- x = solver.IntVar([0], "x")
- self.assertTrue(x.Contains(0))
- self.assertFalse(x.Contains(1))
-
- def test_cumulative_api(self):
- solver = pywrapcp.Solver("Problem")
-
- # Vars
- intervals = [
- solver.FixedDurationIntervalVar(0, 10, 5, False, "S_%s" % a)
- for a in range(10)
- ]
- demands = [a % 3 + 2 for a in range(10)]
- capacity = solver.IntVar(2, 5)
- solver.Add(solver.Cumulative(intervals, demands, capacity, "cumul"))
-
- def test_search_alldiff(self):
- solver = pywrapcp.Solver("test_search_alldiff")
- in_pos = [solver.IntVar(0, 7, "%i" % i) for i in range(8)]
- solver.Add(solver.AllDifferent(in_pos))
- aux_phase = solver.Phase(
- in_pos, solver.CHOOSE_LOWEST_MIN, solver.ASSIGN_MAX_VALUE
- )
- collector = solver.FirstSolutionCollector()
- for i in range(8):
- collector.Add(in_pos[i])
- solver.Solve(aux_phase, [collector])
- for i in range(8):
- print(collector.Value(0, in_pos[i]))
+ def testRabbitPheasant(self):
+ # Create the solver.
+ solver = pywrapcp.Solver("testRabbitPheasant")
+
+ # Create the variables.
+ pheasant = solver.IntVar(0, 100, "pheasant")
+ rabbit = solver.IntVar(0, 100, "rabbit")
+
+ # Create the constraints.
+ solver.Add(pheasant + rabbit == 20)
+ solver.Add(pheasant * 2 + rabbit * 4 == 56)
+
+ # Create the search phase.
+ db = solver.Phase(
+ [rabbit, pheasant], solver.INT_VAR_DEFAULT, solver.ASSIGN_MIN_VALUE
+ )
+
+ # Create assignment
+ solution = solver.Assignment()
+ solution.Add(rabbit)
+ solution.Add(pheasant)
+
+ collector = solver.FirstSolutionCollector(solution)
+
+ # And solve.
+ solver.Solve(db, collector)
+
+ self.assertEqual(1, collector.SolutionCount())
+ current = collector.Solution(0)
+
+ self.assertEqual(12, current.Value(pheasant))
+ self.assertEqual(8, current.Value(rabbit))
+
+ def testSolverParameters(self):
+ # Create the parameters.
+ params = pywrapcp.Solver.DefaultSolverParameters()
+ self.assertIsInstance(
+ params, solver_parameters_pb2.ConstraintSolverParameters
+ )
+ self.assertFalse(params.trace_propagation)
+ params.trace_propagation = True
+ self.assertTrue(params.trace_propagation)
+
+ # Create the solver.
+ solver = pywrapcp.Solver("testRabbitPheasantWithParameters", params)
+ inside_params = solver.Parameters()
+ self.assertTrue(inside_params.trace_propagation)
+
+ def testSolverParametersFields(self):
+ params = solver_parameters_pb2.ConstraintSolverParameters()
+ bool_params = [
+ "store_names",
+ "name_cast_variables",
+ "name_all_variables",
+ "profile_propagation",
+ "trace_propagation",
+ "trace_search",
+ "print_model",
+ "print_model_stats",
+ "print_added_constraints",
+ "disable_solve",
+ ]
+ for p in bool_params:
+ for v in [True, False]:
+ setattr(params, p, v)
+ self.assertEqual(getattr(params, p), v)
+
+ int_params = ["trail_block_size", "array_split_size"]
+ for p in int_params:
+ for v in [10, 100]:
+ setattr(params, p, v)
+ self.assertEqual(getattr(params, p), v)
+
+ string_params = ["profile_file"]
+ for p in string_params:
+ for v in ["", "tmp_file"]:
+ setattr(params, p, v)
+ self.assertEqual(getattr(params, p), v)
+
+ def testIntVarAPI(self):
+ # Create the solver.
+ solver = pywrapcp.Solver("testIntVarAPI")
+
+ c = solver.IntConst(3, "c")
+ self.assertEqual(3, c.Min())
+ self.assertEqual(3, c.Max())
+ self.assertEqual(3, c.Value())
+ self.assertTrue(c.Bound())
+
+ b = solver.BoolVar("b")
+ self.assertEqual(0, b.Min())
+ self.assertEqual(1, b.Max())
+
+ v1 = solver.IntVar(3, 10, "v1")
+ self.assertEqual(3, v1.Min())
+ self.assertEqual(10, v1.Max())
+
+ v2 = solver.IntVar([1, 5, 3], "v2")
+ self.assertEqual(1, v2.Min())
+ self.assertEqual(5, v2.Max())
+ self.assertEqual(3, v2.Size())
+
+ # pylint: disable=too-many-statements
+ def testIntegerArithmetic(self):
+ solver = pywrapcp.Solver("testIntegerArithmetic")
+
+ v1 = solver.IntVar(0, 10, "v1")
+ v2 = solver.IntVar(0, 10, "v2")
+ v3 = solver.IntVar(0, 10, "v3")
+
+ e1 = v1 + v2
+ e2 = v1 + 2
+ e3 = solver.Sum([v1, v2, v3 * 3])
+
+ e4 = v1 - 3
+ e5 = v1 - v2
+ e6 = -v1
+
+ e7 = abs(e6)
+ e8 = v3.Square()
+
+ e9 = v1 * 3
+ e10 = v1 * v2
+
+ e11 = v2.IndexOf([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
+ e11b = v2.IndexOf([0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20])
+ e12 = solver.Min(e1, e2)
+ e13 = solver.Min(e3, 3)
+ e14 = solver.Min([e1 + 1, e2 + 2, e3 + 3])
+
+ e15 = solver.Max(e1, e2)
+ e16 = solver.Max(e3, 3)
+ e17 = solver.Max([e1 + 1, e2 + 2, e3 + 3])
+
+ solver.Add(v1 == 1)
+ solver.Add(v2 == 2)
+ solver.Add(v3 == 3)
+
+ db = solver.Phase(
+ [v1, v2, v3], solver.INT_VAR_DEFAULT, solver.ASSIGN_MIN_VALUE
+ )
+
+ solver.NewSearch(db)
+ solver.NextSolution()
+
+ self.assertEqual(1, v1.Value())
+ self.assertEqual(2, v2.Value())
+ self.assertEqual(3, v3.Value())
+
+ self.assertEqual(3, e1.Min())
+ self.assertEqual(3, e1.Max())
+ self.assertEqual(3, e2.Min())
+ self.assertEqual(3, e2.Max())
+ self.assertEqual(12, e3.Min())
+ self.assertEqual(12, e3.Max())
+ self.assertEqual(-2, e4.Min())
+ self.assertEqual(-2, e4.Max())
+ self.assertEqual(-1, e5.Min())
+ self.assertEqual(-1, e5.Max())
+ self.assertEqual(-1, e6.Min())
+ self.assertEqual(-1, e6.Max())
+ self.assertEqual(1, e7.Min())
+ self.assertEqual(1, e7.Max())
+ self.assertEqual(9, e8.Min())
+ self.assertEqual(9, e8.Max())
+ self.assertEqual(3, e9.Min())
+ self.assertEqual(3, e9.Max())
+ self.assertEqual(2, e10.Min())
+ self.assertEqual(2, e10.Max())
+ self.assertEqual(4, e11.Min())
+ self.assertEqual(4, e11.Max())
+ self.assertEqual(4, e11b.Min())
+ self.assertEqual(4, e11b.Max())
+ self.assertEqual(3, e12.Min())
+ self.assertEqual(3, e12.Max())
+ self.assertEqual(3, e13.Min())
+ self.assertEqual(3, e13.Max())
+ self.assertEqual(4, e14.Min())
+ self.assertEqual(4, e14.Max())
+ self.assertEqual(3, e15.Min())
+ self.assertEqual(3, e15.Max())
+ self.assertEqual(12, e16.Min())
+ self.assertEqual(12, e16.Max())
+ self.assertEqual(15, e17.Min())
+ self.assertEqual(15, e17.Max())
+ solver.EndSearch()
+
+ def testStatusVar(self):
+ solver = pywrapcp.Solver("testStatusVar")
+ v1 = solver.IntVar(0, 10, "v1")
+ v2 = solver.IntVar(0, 10, "v2")
+ c1 = v1 == 3
+ c2 = v1 != 2
+ print(c1)
+ print(c1.Var())
+ print(c2)
+ print(c2.Var())
+ e3 = v1 + c1
+ print(e3)
+ e4 = c1 + c2 == 1
+ print(e4)
+ e5 = solver.Min(c1, c2)
+ print(e5)
+ e6 = solver.Max([c1, c2, e3])
+ print(e6)
+ e7 = 1 + c2
+ print(e7)
+ e8 = solver.Max([v1 > 3, v1 <= 2, v2, v2 <= 0, v2 > 5])
+ print(e8)
+ e9 = solver.Min([v1 == v2, v1 != v2, v1 < v2, v1 > v2, v1 <= v2, v1 >= v2])
+ print(e9)
+
+ def testAllowedAssignment(self):
+ solver = pywrapcp.Solver("testAllowedAssignment")
+
+ v1 = solver.IntVar(0, 10, "v1")
+ v2 = solver.IntVar(0, 10, "v2")
+ v3 = solver.IntVar(0, 10, "v3")
+
+ tuples = [(0, 0, 0), (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4)]
+ dvars = [v1, v2, v3]
+ solver.Add(solver.AllowedAssignments(dvars, tuples))
+ db = solver.Phase(
+ dvars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+
+ solver.NewSearch(db)
+ counter = 0
+ while solver.NextSolution():
+ self.assertEqual(counter, v1.Value())
+ self.assertEqual(counter, v2.Value())
+ self.assertEqual(counter, v3.Value())
+ counter += 1
+ solver.EndSearch()
+ self.assertEqual(5, counter)
+
+ def testAllowedAssignment2(self):
+ solver = pywrapcp.Solver("testAllowedAssignment")
+
+ v1 = solver.IntVar(0, 10, "v1")
+ v2 = solver.IntVar(0, 10, "v2")
+ v3 = solver.IntVar(0, 10, "v3")
+
+ dvars = [v1, v2, v3]
+ solver.Add(solver.AllowedAssignments(dvars, [(x, x, x) for x in range(5)]))
+ db = solver.Phase(
+ dvars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+
+ solver.NewSearch(db)
+ counter = 0
+ while solver.NextSolution():
+ self.assertEqual(counter, v1.Value())
+ self.assertEqual(counter, v2.Value())
+ self.assertEqual(counter, v3.Value())
+ counter += 1
+ solver.EndSearch()
+ self.assertEqual(5, counter)
+
+ def testIntExprToIntVarCast(self):
+ solver = pywrapcp.Solver("testIntExprToIntVarCast")
+
+ var1 = solver.IntVar(0, 10, "var1")
+ var2 = solver.IntVar(0, 10, "var2")
+ values = [1, 3, 5, 7, 9, 2, 4, 6, 8, 0]
+ # This test fails if the cast is not correctly done.
+ expr = (var1 + var2).IndexOf(values)
+ self.assertTrue(expr)
+
+ def testIntExprToIntVarCastInSolution(self):
+ solver = pywrapcp.Solver("testIntExprToIntVarCastInSolution")
+
+ var1 = solver.IntVar(0, 10, "var1")
+ var2 = solver.IntVar(0, 10, "var2")
+ solution = solver.Assignment()
+ expr = var1 + var2
+ solution.Add(expr)
+ solution.Store()
+ # The next line fails if the cast is not correctly done.
+ self.assertEqual(20, solution.Max(expr))
+
+ def testIndexOf(self):
+ solver = pywrapcp.Solver("element")
+ index = solver.IntVar(0, 2, "index")
+ element = index.IndexOf([1, 2, 3])
+ self.assertEqual(1, element.Min())
+ self.assertEqual(3, element.Max())
+
+ def testElementFunction(self):
+ solver = pywrapcp.Solver("element")
+ index = solver.IntVar(0, 2, "index")
+ element = solver.ElementFunction(inc_callback, index)
+ self.assertEqual(1, element.Min())
+ self.assertEqual(3, element.Max())
+
+ def testElementMethod(self):
+ solver = pywrapcp.Solver("element")
+ index = solver.IntVar(0, 2, "index")
+ class_callback = ClassIncCallback(2)
+ class_method = class_callback.inc_method
+ self.assertEqual(5, class_method(3))
+ element = solver.ElementFunction(class_method, index)
+ self.assertEqual(2, element.Min())
+ self.assertEqual(4, element.Max())
+
+ # TODO(user): better test all other ForwardSequence methods.
+ def testForwardSequence(self):
+ solver = pywrapcp.Solver("element")
+ intervals = [
+ solver.FixedDurationIntervalVar(0, 10, 5, False, "Youpi")
+ for _ in range(10)
+ ]
+ disjunction = solver.DisjunctiveConstraint(intervals, "Blup")
+ sequence = disjunction.SequenceVar()
+ assignment = solver.Assignment()
+ assignment.Add(sequence)
+ assignment.SetForwardSequence(sequence, [1, 3, 5])
+ self.assertListEqual(assignment.ForwardSequence(sequence), [1, 3, 5])
+
+ def test_member(self):
+ solver = pywrapcp.Solver("test member")
+ x = solver.IntVar(1, 10, "x")
+ ct = x.Member([1, 2, 3, 5])
+ print("Constraint: {}".format(ct))
+
+ def test_sparse_var(self):
+ solver = pywrapcp.Solver("test sparse")
+ x = solver.IntVar([1, 3, 5], "x")
+ self.assertTrue(x.Contains(1))
+ self.assertFalse(x.Contains(2))
+ # print(x)
+
+ def test_modulo(self):
+ solver = pywrapcp.Solver("test modulo")
+ x = solver.IntVar(0, 10, "x")
+ y = solver.IntVar(2, 4, "y")
+ print(x % 3)
+ print(x % y)
+
+ def test_modulo2(self):
+ solver = pywrapcp.Solver("test modulo")
+ x = solver.IntVar([-7, 7], "x")
+ y = solver.IntVar([-4, 4], "y")
+ z = (x % y).Var()
+ t = (x // y).Var()
+ db = solver.Phase(
+ [x, y], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+ solver.NewSearch(db)
+ while solver.NextSolution():
+ print(
+ "x = %d, y = %d, x %% y = %d, x div y = %d"
+ % (x.Value(), y.Value(), z.Value(), t.Value())
+ )
+ solver.EndSearch()
+
+ def test_limit(self):
+ solver = pywrapcp.Solver("test limit")
+ # limit_proto = solver.DefaultSearchLimitParameters()
+ limit_proto = search_limit_pb2.RegularLimitParameters(
+ time=10000, branches=10
+ )
+ print("limit proto: {}".format(limit_proto))
+ limit = solver.Limit(limit_proto)
+ print("limit: {}".format(limit))
+
+ def test_export(self):
+ solver = pywrapcp.Solver("test export")
+ x = solver.IntVar(1, 10, "x")
+ ct = x.Member([1, 2, 3, 5])
+ solver.Add(ct)
+ # proto = model_pb2.CpModel()
+ # proto.model = 'wrong name'
+ # solver.ExportModel(proto)
+ # print(repr(proto))
+ # print(str(proto))
+
+ def test_size_1_var(self):
+ solver = pywrapcp.Solver("test_size_1_var")
+ x = solver.IntVar([0], "x")
+ self.assertTrue(x.Contains(0))
+ self.assertFalse(x.Contains(1))
+
+ def test_cumulative_api(self):
+ solver = pywrapcp.Solver("Problem")
+
+ # Vars
+ intervals = [
+ solver.FixedDurationIntervalVar(0, 10, 5, False, "S_%s" % a)
+ for a in range(10)
+ ]
+ demands = [a % 3 + 2 for a in range(10)]
+ capacity = solver.IntVar(2, 5)
+ solver.Add(solver.Cumulative(intervals, demands, capacity, "cumul"))
+
+ def test_search_alldiff(self):
+ solver = pywrapcp.Solver("test_search_alldiff")
+ in_pos = [solver.IntVar(0, 7, "%i" % i) for i in range(8)]
+ solver.Add(solver.AllDifferent(in_pos))
+ aux_phase = solver.Phase(
+ in_pos, solver.CHOOSE_LOWEST_MIN, solver.ASSIGN_MAX_VALUE
+ )
+ collector = solver.FirstSolutionCollector()
+ for i in range(8):
+ collector.Add(in_pos[i])
+ solver.Solve(aux_phase, [collector])
+ for i in range(8):
+ print(collector.Value(0, in_pos[i]))
class CustomSearchMonitor(pywrapcp.SearchMonitor):
- def __init__(self, solver, nexts):
- pywrapcp.SearchMonitor.__init__(self, solver)
- self._nexts = nexts
+ def __init__(self, solver, nexts):
+ pywrapcp.SearchMonitor.__init__(self, solver)
+ self._nexts = nexts
- def BeginInitialPropagation(self):
- print(self._nexts)
+ def BeginInitialPropagation(self):
+ print(self._nexts)
- def EndInitialPropagation(self):
- print(self._nexts)
+ def EndInitialPropagation(self):
+ print(self._nexts)
class SearchMonitorTest(absltest.TestCase):
- def test_search_monitor(self):
- print("test_search_monitor")
- solver = pywrapcp.Solver("test search monitor")
- x = solver.IntVar(1, 10, "x")
- ct = x == 3
- solver.Add(ct)
- db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
- monitor = CustomSearchMonitor(solver, x)
- solver.Solve(db, monitor)
+ def test_search_monitor(self):
+ print("test_search_monitor")
+ solver = pywrapcp.Solver("test search monitor")
+ x = solver.IntVar(1, 10, "x")
+ ct = x == 3
+ solver.Add(ct)
+ db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
+ monitor = CustomSearchMonitor(solver, x)
+ solver.Solve(db, monitor)
class CustomDemon(pywrapcp.PyDemon):
- def __init__(self, x):
- super().__init__()
- self._x = x
- print("Demon built")
+ def __init__(self, x):
+ super().__init__()
+ self._x = x
+ print("Demon built")
- def Run(self, solver):
- print("in Run(), saw " + str(self._x))
+ def Run(self, solver):
+ print("in Run(), saw " + str(self._x))
class DemonTest(absltest.TestCase):
- def test_demon(self):
- print("test_demon")
- solver = pywrapcp.Solver("test export")
- x = solver.IntVar(1, 10, "x")
- demon = CustomDemon(x)
- demon.Run(solver)
+ def test_demon(self):
+ print("test_demon")
+ solver = pywrapcp.Solver("test export")
+ x = solver.IntVar(1, 10, "x")
+ demon = CustomDemon(x)
+ demon.Run(solver)
class CustomConstraint(pywrapcp.PyConstraint):
- def __init__(self, solver, x):
- super().__init__(solver)
- self._x = x
- print("Constraint built")
+ def __init__(self, solver, x):
+ super().__init__(solver)
+ self._x = x
+ print("Constraint built")
- def Post(self):
- print("in Post()", file=sys.stderr)
- self._demon = CustomDemon(self._x)
- self._x.WhenBound(self._demon)
- print("out of Post()", file=sys.stderr)
+ def Post(self):
+ print("in Post()", file=sys.stderr)
+ self._demon = CustomDemon(self._x)
+ self._x.WhenBound(self._demon)
+ print("out of Post()", file=sys.stderr)
- def InitialPropagate(self):
- print("in InitialPropagate()")
- self._x.SetMin(5)
- print(self._x)
- print("out of InitialPropagate()")
+ def InitialPropagate(self):
+ print("in InitialPropagate()")
+ self._x.SetMin(5)
+ print(self._x)
+ print("out of InitialPropagate()")
- def DebugString(self):
- return "CustomConstraint"
+ def DebugString(self):
+ return "CustomConstraint"
class InitialPropagateDemon(pywrapcp.PyDemon):
- def __init__(self, constraint):
- super().__init__()
- self._ct = constraint
+ def __init__(self, constraint):
+ super().__init__()
+ self._ct = constraint
- def Run(self, solver):
- self._ct.InitialPropagate()
+ def Run(self, solver):
+ self._ct.InitialPropagate()
class DumbGreaterOrEqualToFive(pywrapcp.PyConstraint):
- def __init__(self, solver, x):
- super().__init__(solver)
- self._x = x
+ def __init__(self, solver, x):
+ super().__init__(solver)
+ self._x = x
- def Post(self):
- self._demon = InitialPropagateDemon(self)
- self._x.WhenBound(self._demon)
+ def Post(self):
+ self._demon = InitialPropagateDemon(self)
+ self._x.WhenBound(self._demon)
- def InitialPropagate(self):
- if self._x.Bound():
- if self._x.Value() < 5:
- print("Reject %d" % self._x.Value(), file=sys.stderr)
- self.solver().Fail()
- else:
- print("Accept %d" % self._x.Value(), file=sys.stderr)
+ def InitialPropagate(self):
+ if self._x.Bound():
+ if self._x.Value() < 5:
+ print("Reject %d" % self._x.Value(), file=sys.stderr)
+ self.solver().Fail()
+ else:
+ print("Accept %d" % self._x.Value(), file=sys.stderr)
class WatchDomain(pywrapcp.PyDemon):
- def __init__(self, x):
- super().__init__()
- self._x = x
+ def __init__(self, x):
+ super().__init__()
+ self._x = x
- def Run(self, solver):
- for i in self._x.HoleIterator():
- print("Removed %d" % i)
+ def Run(self, solver):
+ for i in self._x.HoleIterator():
+ print("Removed %d" % i)
class HoleConstraint(pywrapcp.PyConstraint):
- def __init__(self, solver, x):
- super().__init__(solver)
- self._x = x
+ def __init__(self, solver, x):
+ super().__init__(solver)
+ self._x = x
- def Post(self):
- self._demon = WatchDomain(self._x)
- self._x.WhenDomain(self._demon)
+ def Post(self):
+ self._demon = WatchDomain(self._x)
+ self._x.WhenDomain(self._demon)
- def InitialPropagate(self):
- self._x.RemoveValue(5)
+ def InitialPropagate(self):
+ self._x.RemoveValue(5)
class BinarySum(pywrapcp.PyConstraint):
- def __init__(self, solver, x, y, z):
- super().__init__(solver)
- self._x = x
- self._y = y
- self._z = z
-
- def Post(self):
- self._demon = InitialPropagateDemon(self)
- self._x.WhenRange(self._demon)
- self._y.WhenRange(self._demon)
- self._z.WhenRange(self._demon)
-
- def InitialPropagate(self):
- self._z.SetRange(self._x.Min() + self._y.Min(), self._x.Max() + self._y.Max())
- self._x.SetRange(self._z.Min() - self._y.Max(), self._z.Max() - self._y.Min())
- self._y.SetRange(self._z.Min() - self._x.Max(), self._z.Max() - self._x.Min())
+ def __init__(self, solver, x, y, z):
+ super().__init__(solver)
+ self._x = x
+ self._y = y
+ self._z = z
+
+ def Post(self):
+ self._demon = InitialPropagateDemon(self)
+ self._x.WhenRange(self._demon)
+ self._y.WhenRange(self._demon)
+ self._z.WhenRange(self._demon)
+
+ def InitialPropagate(self):
+ self._z.SetRange(
+ self._x.Min() + self._y.Min(), self._x.Max() + self._y.Max()
+ )
+ self._x.SetRange(
+ self._z.Min() - self._y.Max(), self._z.Max() - self._y.Min()
+ )
+ self._y.SetRange(
+ self._z.Min() - self._x.Max(), self._z.Max() - self._x.Min()
+ )
class ConstraintTest(absltest.TestCase):
- def test_member(self):
- print("test_member")
- solver = pywrapcp.Solver("test member")
- x = solver.IntVar(1, 10, "x")
- constraint = x.Member([1, 2, 3, 5])
- print(constraint)
-
- def test_sparse_var(self):
- print("test_sparse_var")
- solver = pywrapcp.Solver("test_sparse_var")
- x = solver.IntVar([1, 3, 5], "x")
- print(x)
-
- def test_modulo(self):
- print("test_modulo")
- solver = pywrapcp.Solver("test_modulo")
- x = solver.IntVar(0, 10, "x")
- y = solver.IntVar(2, 4, "y")
- print(x % 3)
- print(x % y)
-
- def test_limit(self):
- solver = pywrapcp.Solver("test_limit")
- # TODO(user): expose the proto-based MakeLimit() API in or-tools and test it
- # here.
- time = 10000 # ms
- branches = 10
- failures = sys.maxsize
- solutions = sys.maxsize
- smart_time_check = True
- cumulative = False
- limit = solver.Limit(
- time, branches, failures, solutions, smart_time_check, cumulative
- )
- print(limit)
-
- def test_search_monitor(self):
- print("test_search_monitor")
- solver = pywrapcp.Solver("test search_monitor")
- x = solver.IntVar(1, 10, "x")
- ct = x == 3
- solver.Add(ct)
- db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
- monitor = CustomSearchMonitor(solver, x)
- solver.Solve(db, monitor)
-
- def test_constraint(self):
- print("test_constraint")
- solver = pywrapcp.Solver("test_constraint")
- x = solver.IntVar(1, 10, "x")
- myct = CustomConstraint(solver, x)
- solver.Add(myct)
- db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
- solver.Solve(db)
-
- def test_failing_constraint(self):
- print("test_failing_constraint")
- solver = pywrapcp.Solver("test failing constraint")
- x = solver.IntVar(1, 10, "x")
- myct = DumbGreaterOrEqualToFive(solver, x)
- solver.Add(myct)
- db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
- solver.Solve(db)
-
- def test_domain_iterator(self):
- print("test_domain_iterator")
- solver = pywrapcp.Solver("test_domain_iterator")
- x = solver.IntVar([1, 2, 4, 6], "x")
- for i in x.DomainIterator():
- print(i)
-
- def test_hole_iterator(self):
- print("test_hole_iterator")
- solver = pywrapcp.Solver("test_hole_iterator")
- x = solver.IntVar(1, 10, "x")
- myct = HoleConstraint(solver, x)
- solver.Add(myct)
- db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
- solver.Solve(db)
-
- def test_sum_constraint(self):
- print("test_sum_constraint")
- solver = pywrapcp.Solver("test_sum_constraint")
- x = solver.IntVar(1, 5, "x")
- y = solver.IntVar(1, 5, "y")
- z = solver.IntVar(1, 5, "z")
- binary_sum = BinarySum(solver, x, y, z)
- solver.Add(binary_sum)
- db = solver.Phase(
- [x, y, z], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
- )
- solver.NewSearch(db)
- while solver.NextSolution():
- print("%d + %d == %d" % (x.Value(), y.Value(), z.Value()))
- solver.EndSearch()
+ def test_member(self):
+ print("test_member")
+ solver = pywrapcp.Solver("test member")
+ x = solver.IntVar(1, 10, "x")
+ constraint = x.Member([1, 2, 3, 5])
+ print(constraint)
+
+ def test_sparse_var(self):
+ print("test_sparse_var")
+ solver = pywrapcp.Solver("test_sparse_var")
+ x = solver.IntVar([1, 3, 5], "x")
+ print(x)
+
+ def test_modulo(self):
+ print("test_modulo")
+ solver = pywrapcp.Solver("test_modulo")
+ x = solver.IntVar(0, 10, "x")
+ y = solver.IntVar(2, 4, "y")
+ print(x % 3)
+ print(x % y)
+
+ def test_limit(self):
+ solver = pywrapcp.Solver("test_limit")
+ # TODO(user): expose the proto-based MakeLimit() API in or-tools and test it
+ # here.
+ time = 10000 # ms
+ branches = 10
+ failures = sys.maxsize
+ solutions = sys.maxsize
+ smart_time_check = True
+ cumulative = False
+ limit = solver.Limit(
+ time, branches, failures, solutions, smart_time_check, cumulative
+ )
+ print(limit)
+
+ def test_search_monitor(self):
+ print("test_search_monitor")
+ solver = pywrapcp.Solver("test search_monitor")
+ x = solver.IntVar(1, 10, "x")
+ ct = x == 3
+ solver.Add(ct)
+ db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
+ monitor = CustomSearchMonitor(solver, x)
+ solver.Solve(db, monitor)
+
+ def test_constraint(self):
+ print("test_constraint")
+ solver = pywrapcp.Solver("test_constraint")
+ x = solver.IntVar(1, 10, "x")
+ myct = CustomConstraint(solver, x)
+ solver.Add(myct)
+ db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
+ solver.Solve(db)
+
+ def test_failing_constraint(self):
+ print("test_failing_constraint")
+ solver = pywrapcp.Solver("test failing constraint")
+ x = solver.IntVar(1, 10, "x")
+ myct = DumbGreaterOrEqualToFive(solver, x)
+ solver.Add(myct)
+ db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
+ solver.Solve(db)
+
+ def test_domain_iterator(self):
+ print("test_domain_iterator")
+ solver = pywrapcp.Solver("test_domain_iterator")
+ x = solver.IntVar([1, 2, 4, 6], "x")
+ for i in x.DomainIterator():
+ print(i)
+
+ def test_hole_iterator(self):
+ print("test_hole_iterator")
+ solver = pywrapcp.Solver("test_hole_iterator")
+ x = solver.IntVar(1, 10, "x")
+ myct = HoleConstraint(solver, x)
+ solver.Add(myct)
+ db = solver.Phase([x], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
+ solver.Solve(db)
+
+ def test_sum_constraint(self):
+ print("test_sum_constraint")
+ solver = pywrapcp.Solver("test_sum_constraint")
+ x = solver.IntVar(1, 5, "x")
+ y = solver.IntVar(1, 5, "y")
+ z = solver.IntVar(1, 5, "z")
+ binary_sum = BinarySum(solver, x, y, z)
+ solver.Add(binary_sum)
+ db = solver.Phase(
+ [x, y, z], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+ solver.NewSearch(db)
+ while solver.NextSolution():
+ print("%d + %d == %d" % (x.Value(), y.Value(), z.Value()))
+ solver.EndSearch()
class CustomDecisionBuilder(pywrapcp.PyDecisionBuilder):
- def __init__(self):
- super().__init__()
- self._counter = 0
+ def __init__(self):
+ super().__init__()
+ self._counter = 0
- def Next(self, solver):
- print("In Next", file=sys.stderr)
- self._counter += 1
- return None
+ def Next(self, solver):
+ print("In Next", file=sys.stderr)
+ self._counter += 1
+ return None
- def DebugString(self):
- return "CustomDecisionBuilder"
+ def DebugString(self):
+ return "CustomDecisionBuilder"
class CustomDecision(pywrapcp.PyDecision):
- def __init__(self):
- print("In CustomDecision ctor", file=sys.stderr)
- super().__init__()
- self._val = 1
- print("Set value to", self._val, file=sys.stderr)
+ def __init__(self):
+ print("In CustomDecision ctor", file=sys.stderr)
+ super().__init__()
+ self._val = 1
+ print("Set value to", self._val, file=sys.stderr)
- def Apply(self, solver):
- print("In CustomDecision.Apply()", file=sys.stderr)
- print("Expect value", self._val, file=sys.stderr)
- solver.Fail()
+ def Apply(self, solver):
+ print("In CustomDecision.Apply()", file=sys.stderr)
+ print("Expect value", self._val, file=sys.stderr)
+ solver.Fail()
- def Refute(self, solver):
- print("In CustomDecision.Refute()", file=sys.stderr)
+ def Refute(self, solver):
+ print("In CustomDecision.Refute()", file=sys.stderr)
- def DebugString(self):
- return "CustomDecision"
+ def DebugString(self):
+ return "CustomDecision"
class CustomDecisionBuilderCustomDecision(pywrapcp.PyDecisionBuilder):
- def __init__(self):
- super().__init__()
- self.__done = False
- self._counter = 0
+ def __init__(self):
+ super().__init__()
+ self.__done = False
+ self._counter = 0
- def Next(self, solver):
- print("In CustomDecisionBuilderCustomDecision.Next()", file=sys.stderr)
- self._counter += 1
- if not self.__done:
- self.__done = True
- self.__decision = CustomDecision()
- return self.__decision
- return None
+ def Next(self, solver):
+ print("In CustomDecisionBuilderCustomDecision.Next()", file=sys.stderr)
+ self._counter += 1
+ if not self.__done:
+ self.__done = True
+ self.__decision = CustomDecision()
+ return self.__decision
+ return None
- def DebugString(self):
- return "CustomDecisionBuilderCustomDecision"
+ def DebugString(self):
+ return "CustomDecisionBuilderCustomDecision"
class DecisionTest(absltest.TestCase):
- def test_custom_decision_builder(self):
- solver = pywrapcp.Solver("test_custom_decision_builder")
- db = CustomDecisionBuilder()
- print(str(db))
- solver.Solve(db)
- self.assertEqual(db._counter, 1)
+ def test_custom_decision_builder(self):
+ solver = pywrapcp.Solver("test_custom_decision_builder")
+ db = CustomDecisionBuilder()
+ print(str(db))
+ solver.Solve(db)
+ self.assertEqual(db._counter, 1)
- def test_custom_decision(self):
- solver = pywrapcp.Solver("test_custom_decision")
- db = CustomDecisionBuilderCustomDecision()
- print(str(db))
- solver.Solve(db)
- self.assertEqual(db._counter, 2)
+ def test_custom_decision(self):
+ solver = pywrapcp.Solver("test_custom_decision")
+ db = CustomDecisionBuilderCustomDecision()
+ print(str(db))
+ solver.Solve(db)
+ self.assertEqual(db._counter, 2)
class LocalSearchTest(absltest.TestCase):
- class OneVarLNS(pywrapcp.BaseLns):
- """One Var LNS."""
-
- def __init__(self, int_vars):
- super().__init__(int_vars)
- self.__index = 0
-
- def InitFragments(self):
- print("OneVarLNS.InitFragments()...", file=sys.stderr)
- self.__index = 0
-
- def NextFragment(self):
- print("OneVarLNS.NextFragment()...", file=sys.stderr)
- if self.__index < self.Size():
- self.AppendToFragment(self.__index)
- self.__index += 1
- return True
- else:
- return False
-
- class MoveOneVar(pywrapcp.IntVarLocalSearchOperator):
- """Move one var up or down."""
-
- def __init__(self, int_vars):
- super().__init__(int_vars)
- self.__index = 0
- self.__up = False
-
- def OneNeighbor(self):
- print("MoveOneVar.OneNeighbor()...", file=sys.stderr)
- current_value = self.OldValue(self.__index)
- if self.__up:
- self.SetValue(self.__index, current_value + 1)
- self.__index = (self.__index + 1) % self.Size()
- else:
- self.SetValue(self.__index, current_value - 1)
- self.__up = not self.__up
- return True
-
- def OnStart(self):
- print("MoveOneVar.OnStart()...", file=sys.stderr)
- pass
-
- def IsIncremental(self):
- return False
-
- class SumFilter(pywrapcp.IntVarLocalSearchFilter):
- """Filter to speed up LS computation."""
-
- def __init__(self, int_vars):
- super().__init__(int_vars)
- self.__sum = 0
-
- def OnSynchronize(self, delta):
- self.__sum = sum(self.Value(index) for index in range(self.Size()))
-
- def Accept(
- self,
- delta,
- unused_delta_delta,
- unused_objective_min,
- unused_objective_max,
- ):
- solution_delta = delta.IntVarContainer()
- solution_delta_size = solution_delta.Size()
- for i in range(solution_delta_size):
- if not solution_delta.Element(i).Activated():
- return True
- new_sum = self.__sum
- for i in range(solution_delta_size):
- element = solution_delta.Element(i)
- int_var = element.Var()
- touched_var_index = self.IndexFromVar(int_var)
- old_value = self.Value(touched_var_index)
- new_value = element.Value()
- new_sum += new_value - old_value
-
- return new_sum < self.__sum
-
- def IsIncremental(self):
- return False
-
- def solve(self, local_search_type):
- solver = pywrapcp.Solver("Solve")
- int_vars = [solver.IntVar(0, 4) for _ in range(4)]
- sum_var = solver.Sum(int_vars).Var()
- objective = solver.Minimize(sum_var, 1)
- inner_db = solver.Phase(
- int_vars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MAX_VALUE
- )
- if local_search_type == 0: # LNS
- print("Large Neighborhood Search", file=sys.stderr)
- one_var_lns = self.OneVarLNS(int_vars)
- ls_params = solver.LocalSearchPhaseParameters(
- sum_var, one_var_lns, inner_db
- )
- ls = solver.LocalSearchPhase(int_vars, inner_db, ls_params)
- elif local_search_type == 1: # LS
- print("Local Search", file=sys.stderr)
- move_one_var = self.MoveOneVar(int_vars)
- ls_params = solver.LocalSearchPhaseParameters(
- sum_var, move_one_var, inner_db
- )
- ls = solver.LocalSearchPhase(int_vars, inner_db, ls_params)
- else:
- print("Local Search with Filter", file=sys.stderr)
- move_one_var = self.MoveOneVar(int_vars)
- sum_filter = self.SumFilter(int_vars)
- filter_manager = pywrapcp.LocalSearchFilterManager([sum_filter])
- ls_params = solver.LocalSearchPhaseParameters(
- sum_var, move_one_var, inner_db, None, filter_manager
- )
- ls = solver.LocalSearchPhase(int_vars, inner_db, ls_params)
-
- collector = solver.LastSolutionCollector()
- collector.Add(int_vars)
- collector.AddObjective(sum_var)
- log = solver.SearchLog(1000, objective)
- solver.Solve(ls, [collector, objective, log])
- print("Objective value = %d" % collector.ObjectiveValue(0), file=sys.stderr)
-
- def test_large_neighborhood_search(self):
- self.solve(0)
-
- def test_local_search(self):
- self.solve(1)
-
- def test_local_search_with_filter(self):
- self.solve(2)
+ class OneVarLNS(pywrapcp.BaseLns):
+ """One Var LNS."""
+
+ def __init__(self, int_vars):
+ super().__init__(int_vars)
+ self.__index = 0
+
+ def InitFragments(self):
+ print("OneVarLNS.InitFragments()...", file=sys.stderr)
+ self.__index = 0
+
+ def NextFragment(self):
+ print("OneVarLNS.NextFragment()...", file=sys.stderr)
+ if self.__index < self.Size():
+ self.AppendToFragment(self.__index)
+ self.__index += 1
+ return True
+ else:
+ return False
+
+ class MoveOneVar(pywrapcp.IntVarLocalSearchOperator):
+ """Move one var up or down."""
+
+ def __init__(self, int_vars):
+ super().__init__(int_vars)
+ self.__index = 0
+ self.__up = False
+
+ def OneNeighbor(self):
+ print("MoveOneVar.OneNeighbor()...", file=sys.stderr)
+ current_value = self.OldValue(self.__index)
+ if self.__up:
+ self.SetValue(self.__index, current_value + 1)
+ self.__index = (self.__index + 1) % self.Size()
+ else:
+ self.SetValue(self.__index, current_value - 1)
+ self.__up = not self.__up
+ return True
+
+ def OnStart(self):
+ print("MoveOneVar.OnStart()...", file=sys.stderr)
+ pass
+
+ def IsIncremental(self):
+ return False
+
+ class SumFilter(pywrapcp.IntVarLocalSearchFilter):
+ """Filter to speed up LS computation."""
+
+ def __init__(self, int_vars):
+ super().__init__(int_vars)
+ self.__sum = 0
+
+ def OnSynchronize(self, delta):
+ self.__sum = sum(self.Value(index) for index in range(self.Size()))
+
+ def Accept(
+ self,
+ delta,
+ unused_delta_delta,
+ unused_objective_min,
+ unused_objective_max,
+ ):
+ solution_delta = delta.IntVarContainer()
+ solution_delta_size = solution_delta.Size()
+ for i in range(solution_delta_size):
+ if not solution_delta.Element(i).Activated():
+ return True
+ new_sum = self.__sum
+ for i in range(solution_delta_size):
+ element = solution_delta.Element(i)
+ int_var = element.Var()
+ touched_var_index = self.IndexFromVar(int_var)
+ old_value = self.Value(touched_var_index)
+ new_value = element.Value()
+ new_sum += new_value - old_value
+
+ return new_sum < self.__sum
+
+ def IsIncremental(self):
+ return False
+
+ def solve(self, local_search_type):
+ solver = pywrapcp.Solver("Solve")
+ int_vars = [solver.IntVar(0, 4) for _ in range(4)]
+ sum_var = solver.Sum(int_vars).Var()
+ objective = solver.Minimize(sum_var, 1)
+ inner_db = solver.Phase(
+ int_vars, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MAX_VALUE
+ )
+ if local_search_type == 0: # LNS
+ print("Large Neighborhood Search", file=sys.stderr)
+ one_var_lns = self.OneVarLNS(int_vars)
+ ls_params = solver.LocalSearchPhaseParameters(
+ sum_var, one_var_lns, inner_db
+ )
+ ls = solver.LocalSearchPhase(int_vars, inner_db, ls_params)
+ elif local_search_type == 1: # LS
+ print("Local Search", file=sys.stderr)
+ move_one_var = self.MoveOneVar(int_vars)
+ ls_params = solver.LocalSearchPhaseParameters(
+ sum_var, move_one_var, inner_db
+ )
+ ls = solver.LocalSearchPhase(int_vars, inner_db, ls_params)
+ else:
+ print("Local Search with Filter", file=sys.stderr)
+ move_one_var = self.MoveOneVar(int_vars)
+ sum_filter = self.SumFilter(int_vars)
+ filter_manager = pywrapcp.LocalSearchFilterManager([sum_filter])
+ ls_params = solver.LocalSearchPhaseParameters(
+ sum_var, move_one_var, inner_db, None, filter_manager
+ )
+ ls = solver.LocalSearchPhase(int_vars, inner_db, ls_params)
+
+ collector = solver.LastSolutionCollector()
+ collector.Add(int_vars)
+ collector.AddObjective(sum_var)
+ log = solver.SearchLog(1000, objective)
+ solver.Solve(ls, [collector, objective, log])
+ print("Objective value = %d" % collector.ObjectiveValue(0), file=sys.stderr)
+
+ def test_large_neighborhood_search(self):
+ self.solve(0)
+
+ def test_local_search(self):
+ self.solve(1)
+
+ def test_local_search_with_filter(self):
+ self.solve(2)
class MyDecisionBuilder(pywrapcp.PyDecisionBuilder):
- def __init__(self, var, value):
- super().__init__()
- self.__var = var
- self.__value = value
+ def __init__(self, var, value):
+ super().__init__()
+ self.__var = var
+ self.__value = value
- def Next(self, solver):
- if not self.__var.Bound():
- decision = solver.AssignVariableValue(self.__var, self.__value)
- return decision
+ def Next(self, solver):
+ if not self.__var.Bound():
+ decision = solver.AssignVariableValue(self.__var, self.__value)
+ return decision
class MyLns(pywrapcp.BaseLns):
- def __init__(self, int_vars):
- super().__init__(int_vars)
- self.__current = 0
+ def __init__(self, int_vars):
+ super().__init__(int_vars)
+ self.__current = 0
- def InitFragments(self):
- self.__current = 0
+ def InitFragments(self):
+ self.__current = 0
- def NextFragment(self, fragment, values):
- while self.__current < len(values):
- if values[self.__current] == 1:
- fragment.append(self.__current)
- self.__current += 1
- return True
- else:
- self.__current += 1
+ def NextFragment(self, fragment, values):
+ while self.__current < len(values):
+ if values[self.__current] == 1:
+ fragment.append(self.__current)
+ self.__current += 1
+ return True
+ else:
+ self.__current += 1
class MyLnsNoValues(pywrapcp.BaseLns):
- def __init__(self, int_vars):
- super().__init__(int_vars)
- self.__current = 0
- self.__size = len(int_vars)
+ def __init__(self, int_vars):
+ super().__init__(int_vars)
+ self.__current = 0
+ self.__size = len(int_vars)
- def InitFragments(self):
- self.__current = 0
+ def InitFragments(self):
+ self.__current = 0
- def NextFragment(self, fragment):
- while self.__current < self.__size:
- fragment.append(self.__current)
- self.__current += 1
- return True
+ def NextFragment(self, fragment):
+ while self.__current < self.__size:
+ fragment.append(self.__current)
+ self.__current += 1
+ return True
class MyDecisionBuilderWithRev(pywrapcp.PyDecisionBuilder):
- def __init__(self, var, value, rev):
- super().__init__()
- self.__var = var
- self.__value = value
- self.__rev = rev
+ def __init__(self, var, value, rev):
+ super().__init__()
+ self.__var = var
+ self.__value = value
+ self.__rev = rev
- def Next(self, solver):
- if not self.__var.Bound():
- if self.__var.Contains(self.__value):
- decision = solver.AssignVariableValue(self.__var, self.__value)
- self.__rev.SetValue(solver, self.__value)
- return decision
- else:
- return solver.FailDecision()
+ def Next(self, solver):
+ if not self.__var.Bound():
+ if self.__var.Contains(self.__value):
+ decision = solver.AssignVariableValue(self.__var, self.__value)
+ self.__rev.SetValue(solver, self.__value)
+ return decision
+ else:
+ return solver.FailDecision()
class MyDecisionBuilderThatFailsWithRev(pywrapcp.PyDecisionBuilder):
- def Next(self, solver):
- solver.Fail()
- return None
+ def Next(self, solver):
+ solver.Fail()
+ return None
class PyWrapCPSearchTest(absltest.TestCase):
- NUMBER_OF_VARIABLES = 10
- VARIABLE_MIN = 0
- VARIABLE_MAX = 10
- LNS_NEIGHBORS = 100
- LNS_VARIABLES = 4
- DECISION_BUILDER_VALUE = 5
- OTHER_DECISION_BUILDER_VALUE = 2
-
- def testNewClassAsDecisionBuilder(self):
- solver = pywrapcp.Solver("testNewClassAsDecisionBuilder")
- x = solver.IntVar(self.VARIABLE_MIN, self.VARIABLE_MAX, "x")
- phase = MyDecisionBuilder(x, self.DECISION_BUILDER_VALUE)
- solver.NewSearch(phase)
- solver.NextSolution()
- self.assertTrue(x.Bound())
- self.assertEqual(self.DECISION_BUILDER_VALUE, x.Min())
- solver.EndSearch()
-
- def testComposeTwoDecisions(self):
- solver = pywrapcp.Solver("testNewClassAsDecisionBuilder")
- x = solver.IntVar(0, 10, "x")
- y = solver.IntVar(0, 10, "y")
- phase_x = MyDecisionBuilder(x, self.DECISION_BUILDER_VALUE)
- phase_y = MyDecisionBuilder(y, self.OTHER_DECISION_BUILDER_VALUE)
- phase = solver.Compose([phase_x, phase_y])
- solver.NewSearch(phase)
- solver.NextSolution()
- self.assertTrue(x.Bound())
- self.assertEqual(self.DECISION_BUILDER_VALUE, x.Min())
- self.assertTrue(y.Bound())
- self.assertEqual(self.OTHER_DECISION_BUILDER_VALUE, y.Min())
- solver.EndSearch()
-
- def testRandomLns(self):
- solver = pywrapcp.Solver("testRandomLnsOperator")
- x = [solver.BoolVar("x_%d" % i) for i in range(self.NUMBER_OF_VARIABLES)]
- lns = solver.RandomLnsOperator(x, self.LNS_VARIABLES)
- delta = solver.Assignment()
- for _ in range(self.LNS_NEIGHBORS):
- delta.Clear()
- self.assertTrue(lns.NextNeighbor(delta, delta))
- self.assertLess(0, delta.Size())
- self.assertGreater(self.LNS_VARIABLES + 1, delta.Size())
-
- def testCallbackLns(self):
- solver = pywrapcp.Solver("testCallbackLNS")
- x = [solver.BoolVar("x_%d" % i) for i in range(self.NUMBER_OF_VARIABLES)]
- lns = MyLns(x)
- solution = solver.Assignment()
- solution.Add(x)
- for v in x:
- solution.SetValue(v, 1)
- obj_var = solver.Sum(x)
- objective = solver.Minimize(obj_var, 1)
- collector = solver.LastSolutionCollector(solution)
- inner_db = solver.Phase(x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
-
- ls_params = solver.LocalSearchPhaseParameters(obj_var.Var(), lns, inner_db)
- ls = solver.LocalSearchPhase(x, inner_db, ls_params)
- log = solver.SearchLog(1000, objective)
- solver.Solve(ls, [collector, objective, log])
- for v in x:
- self.assertEqual(0, collector.Solution(0).Value(v))
-
- def testCallbackLnsNoValues(self):
- solver = pywrapcp.Solver("testCallbackLnsNoValues")
- x = [solver.BoolVar("x_%d" % i) for i in range(self.NUMBER_OF_VARIABLES)]
- lns = MyLnsNoValues(x)
- solution = solver.Assignment()
- solution.Add(x)
- for v in x:
- solution.SetValue(v, 1)
- obj_var = solver.Sum(x)
- objective = solver.Minimize(obj_var, 1)
- collector = solver.LastSolutionCollector(solution)
- inner_db = solver.Phase(x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
-
- ls_params = solver.LocalSearchPhaseParameters(obj_var.Var(), lns, inner_db)
- db = solver.LocalSearchPhase(x, inner_db, ls_params)
- log = solver.SearchLog(1000, objective)
- solver.Solve(db, [collector, objective, log])
- for v in x:
- self.assertEqual(0, collector.Solution(0).Value(v))
-
- def testConcatenateOperators(self):
- solver = pywrapcp.Solver("testConcatenateOperators")
- x = [solver.BoolVar("x_%d" % i) for i in range(self.NUMBER_OF_VARIABLES)]
- op1 = solver.Operator(x, solver.INCREMENT)
- op2 = solver.Operator(x, solver.DECREMENT)
- concatenate = solver.ConcatenateOperators([op1, op2])
- solution = solver.Assignment()
- solution.Add(x)
- for v in x:
- solution.SetValue(v, 1)
- obj_var = solver.Sum(x)
- objective = solver.Minimize(obj_var, 1)
- collector = solver.LastSolutionCollector(solution)
- ls_params = solver.LocalSearchPhaseParameters(obj_var.Var(), concatenate, None)
- db = solver.LocalSearchPhase(solution, ls_params)
- solver.Solve(db, [objective, collector])
- for v in x:
- self.assertEqual(0, collector.Solution(0).Value(v))
-
- def testRevIntegerOutsideSearch(self):
- solver = pywrapcp.Solver("testRevValue")
- revx = pywrapcp.RevInteger(12)
- self.assertEqual(12, revx.Value())
- revx.SetValue(solver, 25)
- self.assertEqual(25, revx.Value())
-
- def testMemberApi(self):
- solver = pywrapcp.Solver("testMemberApi")
- x = solver.IntVar(0, 10, "x")
- c1 = solver.MemberCt(x, [2, 5])
- c2 = x.Member([2, 5])
- self.assertEqual(str(c1), str(c2))
- c3 = solver.NotMemberCt(x, [2, 7], [4, 9])
- c4 = x.NotMember([2, 7], [4, 9])
- self.assertEqual(str(c3), str(c4))
-
- def testRevIntegerInSearch(self):
- solver = pywrapcp.Solver("testRevIntegerInSearch")
- x = solver.IntVar(0, 10, "x")
- rev = pywrapcp.RevInteger(12)
- phase = MyDecisionBuilderWithRev(x, 5, rev)
- solver.NewSearch(phase)
- solver.NextSolution()
- self.assertTrue(x.Bound())
- self.assertEqual(5, rev.Value())
- solver.NextSolution()
- self.assertFalse(x.Bound())
- self.assertEqual(12, rev.Value())
- solver.EndSearch()
-
- def testDecisionBuilderThatFails(self):
- solver = pywrapcp.Solver("testRevIntegerInSearch")
- phase = MyDecisionBuilderThatFailsWithRev()
- self.assertFalse(solver.Solve(phase))
-
- # ----------------helper for binpacking posting----------------
-
- def bin_packing_helper(self, cp, binvars, weights, loadvars):
- nbins = len(loadvars)
- nitems = len(binvars)
- for j in range(nbins):
- b = [cp.BoolVar(str(i)) for i in range(nitems)]
- for i in range(nitems):
- cp.Add(cp.IsEqualCstCt(binvars[i], j, b[i]))
- cp.Add(
- cp.Sum([b[i] * weights[i] for i in range(nitems)]) == loadvars[j]
- )
- cp.Add(cp.Sum(loadvars) == sum(weights))
-
- def testNoNewSearch(self):
- maxcapa = 44
- weights = [4, 22, 9, 5, 8, 3, 3, 4, 7, 7, 3]
- loss = [
- 0,
- 11,
- 10,
- 9,
- 8,
- 7,
- 6,
- 5,
- 4,
- 3,
- 2,
- 1,
- 0,
- 1,
- 0,
- 2,
- 1,
- 0,
- 0,
- 0,
- 0,
- 2,
- 1,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 0,
- 1,
- 0,
- 2,
- 1,
- 0,
- 3,
- 2,
- 1,
- 0,
- 2,
- 1,
- 0,
- 0,
- 0,
- ]
- nbslab = 11
-
- # ------------------solver and variable declaration-------------
-
- solver = pywrapcp.Solver("Steel Mill Slab")
- x = [solver.IntVar(list(range(nbslab)), "x" + str(i)) for i in range(nbslab)]
- l = [solver.IntVar(list(range(maxcapa)), "l" + str(i)) for i in range(nbslab)]
- obj = solver.IntVar(list(range(nbslab * maxcapa)), "obj")
-
- # -------------------post of the constraints--------------
-
- self.bin_packing_helper(solver, x, weights[:nbslab], l)
- solver.Add(solver.Sum([l[s].IndexOf(loss) for s in range(nbslab)]) == obj)
-
- unused_sol = [2, 0, 0, 0, 0, 1, 2, 2, 1, 1, 2]
-
- # ------------start the search and optimization-----------
-
- unused_objective = solver.Minimize(obj, 1)
- unused_db = solver.Phase(x, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
- # solver.NewSearch(db,[objective]) #segfault if NewSearch is not called.
-
- while solver.NextSolution():
- print(obj, "check:", sum([loss[l[s].Min()] for s in range(nbslab)]))
- print(l)
- solver.EndSearch()
+ NUMBER_OF_VARIABLES = 10
+ VARIABLE_MIN = 0
+ VARIABLE_MAX = 10
+ LNS_NEIGHBORS = 100
+ LNS_VARIABLES = 4
+ DECISION_BUILDER_VALUE = 5
+ OTHER_DECISION_BUILDER_VALUE = 2
+
+ def testNewClassAsDecisionBuilder(self):
+ solver = pywrapcp.Solver("testNewClassAsDecisionBuilder")
+ x = solver.IntVar(self.VARIABLE_MIN, self.VARIABLE_MAX, "x")
+ phase = MyDecisionBuilder(x, self.DECISION_BUILDER_VALUE)
+ solver.NewSearch(phase)
+ solver.NextSolution()
+ self.assertTrue(x.Bound())
+ self.assertEqual(self.DECISION_BUILDER_VALUE, x.Min())
+ solver.EndSearch()
+
+ def testComposeTwoDecisions(self):
+ solver = pywrapcp.Solver("testNewClassAsDecisionBuilder")
+ x = solver.IntVar(0, 10, "x")
+ y = solver.IntVar(0, 10, "y")
+ phase_x = MyDecisionBuilder(x, self.DECISION_BUILDER_VALUE)
+ phase_y = MyDecisionBuilder(y, self.OTHER_DECISION_BUILDER_VALUE)
+ phase = solver.Compose([phase_x, phase_y])
+ solver.NewSearch(phase)
+ solver.NextSolution()
+ self.assertTrue(x.Bound())
+ self.assertEqual(self.DECISION_BUILDER_VALUE, x.Min())
+ self.assertTrue(y.Bound())
+ self.assertEqual(self.OTHER_DECISION_BUILDER_VALUE, y.Min())
+ solver.EndSearch()
+
+ def testRandomLns(self):
+ solver = pywrapcp.Solver("testRandomLnsOperator")
+ x = [solver.BoolVar("x_%d" % i) for i in range(self.NUMBER_OF_VARIABLES)]
+ lns = solver.RandomLnsOperator(x, self.LNS_VARIABLES)
+ delta = solver.Assignment()
+ for _ in range(self.LNS_NEIGHBORS):
+ delta.Clear()
+ self.assertTrue(lns.NextNeighbor(delta, delta))
+ self.assertLess(0, delta.Size())
+ self.assertGreater(self.LNS_VARIABLES + 1, delta.Size())
+
+ def testCallbackLns(self):
+ solver = pywrapcp.Solver("testCallbackLNS")
+ x = [solver.BoolVar("x_%d" % i) for i in range(self.NUMBER_OF_VARIABLES)]
+ lns = MyLns(x)
+ solution = solver.Assignment()
+ solution.Add(x)
+ for v in x:
+ solution.SetValue(v, 1)
+ obj_var = solver.Sum(x)
+ objective = solver.Minimize(obj_var, 1)
+ collector = solver.LastSolutionCollector(solution)
+ inner_db = solver.Phase(
+ x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+
+ ls_params = solver.LocalSearchPhaseParameters(obj_var.Var(), lns, inner_db)
+ ls = solver.LocalSearchPhase(x, inner_db, ls_params)
+ log = solver.SearchLog(1000, objective)
+ solver.Solve(ls, [collector, objective, log])
+ for v in x:
+ self.assertEqual(0, collector.Solution(0).Value(v))
+
+ def testCallbackLnsNoValues(self):
+ solver = pywrapcp.Solver("testCallbackLnsNoValues")
+ x = [solver.BoolVar("x_%d" % i) for i in range(self.NUMBER_OF_VARIABLES)]
+ lns = MyLnsNoValues(x)
+ solution = solver.Assignment()
+ solution.Add(x)
+ for v in x:
+ solution.SetValue(v, 1)
+ obj_var = solver.Sum(x)
+ objective = solver.Minimize(obj_var, 1)
+ collector = solver.LastSolutionCollector(solution)
+ inner_db = solver.Phase(
+ x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+
+ ls_params = solver.LocalSearchPhaseParameters(obj_var.Var(), lns, inner_db)
+ db = solver.LocalSearchPhase(x, inner_db, ls_params)
+ log = solver.SearchLog(1000, objective)
+ solver.Solve(db, [collector, objective, log])
+ for v in x:
+ self.assertEqual(0, collector.Solution(0).Value(v))
+
+ def testConcatenateOperators(self):
+ solver = pywrapcp.Solver("testConcatenateOperators")
+ x = [solver.BoolVar("x_%d" % i) for i in range(self.NUMBER_OF_VARIABLES)]
+ op1 = solver.Operator(x, solver.INCREMENT)
+ op2 = solver.Operator(x, solver.DECREMENT)
+ concatenate = solver.ConcatenateOperators([op1, op2])
+ solution = solver.Assignment()
+ solution.Add(x)
+ for v in x:
+ solution.SetValue(v, 1)
+ obj_var = solver.Sum(x)
+ objective = solver.Minimize(obj_var, 1)
+ collector = solver.LastSolutionCollector(solution)
+ ls_params = solver.LocalSearchPhaseParameters(
+ obj_var.Var(), concatenate, None
+ )
+ db = solver.LocalSearchPhase(solution, ls_params)
+ solver.Solve(db, [objective, collector])
+ for v in x:
+ self.assertEqual(0, collector.Solution(0).Value(v))
+
+ def testRevIntegerOutsideSearch(self):
+ solver = pywrapcp.Solver("testRevValue")
+ revx = pywrapcp.RevInteger(12)
+ self.assertEqual(12, revx.Value())
+ revx.SetValue(solver, 25)
+ self.assertEqual(25, revx.Value())
+
+ def testMemberApi(self):
+ solver = pywrapcp.Solver("testMemberApi")
+ x = solver.IntVar(0, 10, "x")
+ c1 = solver.MemberCt(x, [2, 5])
+ c2 = x.Member([2, 5])
+ self.assertEqual(str(c1), str(c2))
+ c3 = solver.NotMemberCt(x, [2, 7], [4, 9])
+ c4 = x.NotMember([2, 7], [4, 9])
+ self.assertEqual(str(c3), str(c4))
+
+ def testRevIntegerInSearch(self):
+ solver = pywrapcp.Solver("testRevIntegerInSearch")
+ x = solver.IntVar(0, 10, "x")
+ rev = pywrapcp.RevInteger(12)
+ phase = MyDecisionBuilderWithRev(x, 5, rev)
+ solver.NewSearch(phase)
+ solver.NextSolution()
+ self.assertTrue(x.Bound())
+ self.assertEqual(5, rev.Value())
+ solver.NextSolution()
+ self.assertFalse(x.Bound())
+ self.assertEqual(12, rev.Value())
+ solver.EndSearch()
+
+ def testDecisionBuilderThatFails(self):
+ solver = pywrapcp.Solver("testRevIntegerInSearch")
+ phase = MyDecisionBuilderThatFailsWithRev()
+ self.assertFalse(solver.Solve(phase))
+
+ # ----------------helper for binpacking posting----------------
+
+ def bin_packing_helper(self, cp, binvars, weights, loadvars):
+ nbins = len(loadvars)
+ nitems = len(binvars)
+ for j in range(nbins):
+ b = [cp.BoolVar(str(i)) for i in range(nitems)]
+ for i in range(nitems):
+ cp.Add(cp.IsEqualCstCt(binvars[i], j, b[i]))
+ cp.Add(
+ cp.Sum([b[i] * weights[i] for i in range(nitems)]) == loadvars[j]
+ )
+ cp.Add(cp.Sum(loadvars) == sum(weights))
+
+ def testNoNewSearch(self):
+ maxcapa = 44
+ weights = [4, 22, 9, 5, 8, 3, 3, 4, 7, 7, 3]
+ loss = [
+ 0,
+ 11,
+ 10,
+ 9,
+ 8,
+ 7,
+ 6,
+ 5,
+ 4,
+ 3,
+ 2,
+ 1,
+ 0,
+ 1,
+ 0,
+ 2,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 2,
+ 1,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1,
+ 0,
+ 2,
+ 1,
+ 0,
+ 3,
+ 2,
+ 1,
+ 0,
+ 2,
+ 1,
+ 0,
+ 0,
+ 0,
+ ]
+ nbslab = 11
+
+ # ------------------solver and variable declaration-------------
+
+ solver = pywrapcp.Solver("Steel Mill Slab")
+ x = [
+ solver.IntVar(list(range(nbslab)), "x" + str(i)) for i in range(nbslab)
+ ]
+ l = [
+ solver.IntVar(list(range(maxcapa)), "l" + str(i)) for i in range(nbslab)
+ ]
+ obj = solver.IntVar(list(range(nbslab * maxcapa)), "obj")
+
+ # -------------------post of the constraints--------------
+
+ self.bin_packing_helper(solver, x, weights[:nbslab], l)
+ solver.Add(solver.Sum([l[s].IndexOf(loss) for s in range(nbslab)]) == obj)
+
+ unused_sol = [2, 0, 0, 0, 0, 1, 2, 2, 1, 1, 2]
+
+ # ------------start the search and optimization-----------
+
+ unused_objective = solver.Minimize(obj, 1)
+ unused_db = solver.Phase(
+ x, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT
+ )
+ # solver.NewSearch(db,[objective]) #segfault if NewSearch is not called.
+
+ while solver.NextSolution():
+ print(obj, "check:", sum([loss[l[s].Min()] for s in range(nbslab)]))
+ print(l)
+ solver.EndSearch()
class SplitDomainDecisionBuilder(pywrapcp.PyDecisionBuilder):
- def __init__(self, var, value, lower):
- super().__init__()
- self.__var = var
- self.__value = value
- self.__lower = lower
- self.__done = pywrapcp.RevBool(False)
+ def __init__(self, var, value, lower):
+ super().__init__()
+ self.__var = var
+ self.__value = value
+ self.__lower = lower
+ self.__done = pywrapcp.RevBool(False)
- def Next(self, solver):
- if self.__done.Value():
- return None
- self.__done.SetValue(solver, True)
- return solver.SplitVariableDomain(self.__var, self.__value, self.__lower)
+ def Next(self, solver):
+ if self.__done.Value():
+ return None
+ self.__done.SetValue(solver, True)
+ return solver.SplitVariableDomain(self.__var, self.__value, self.__lower)
class PyWrapCPDecisionTest(absltest.TestCase):
- def testSplitDomainLower(self):
- solver = pywrapcp.Solver("testSplitDomainLower")
- x = solver.IntVar(0, 10, "x")
- phase = SplitDomainDecisionBuilder(x, 3, True)
- solver.NewSearch(phase)
- self.assertTrue(solver.NextSolution())
- self.assertEqual(0, x.Min())
- self.assertEqual(3, x.Max())
- self.assertTrue(solver.NextSolution())
- self.assertEqual(4, x.Min())
- self.assertEqual(10, x.Max())
- self.assertFalse(solver.NextSolution())
- solver.EndSearch()
-
- def testSplitDomainUpper(self):
- solver = pywrapcp.Solver("testSplitDomainUpper")
- x = solver.IntVar(0, 10, "x")
- phase = SplitDomainDecisionBuilder(x, 6, False)
- solver.NewSearch(phase)
- self.assertTrue(solver.NextSolution())
- self.assertEqual(7, x.Min())
- self.assertEqual(10, x.Max())
- self.assertTrue(solver.NextSolution())
- self.assertEqual(0, x.Min())
- self.assertEqual(6, x.Max())
- self.assertFalse(solver.NextSolution())
- solver.EndSearch()
-
- def testTrueConstraint(self):
- solver = pywrapcp.Solver("test")
- x1 = solver.IntVar(4, 8, "x1")
- x2 = solver.IntVar(3, 7, "x2")
- x3 = solver.IntVar(1, 5, "x3")
- solver.Add((x1 >= 3) + (x2 >= 6) + (x3 <= 3) == 3)
- db = solver.Phase(
- [x1, x2, x3], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
- )
- solver.NewSearch(db)
- solver.NextSolution()
- solver.EndSearch()
-
- def testFalseConstraint(self):
- solver = pywrapcp.Solver("test")
- x1 = solver.IntVar(4, 8, "x1")
- x2 = solver.IntVar(3, 7, "x2")
- x3 = solver.IntVar(1, 5, "x3")
- solver.Add((x1 <= 3) + (x2 >= 6) + (x3 <= 3) == 3)
- db = solver.Phase(
- [x1, x2, x3], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
- )
- solver.NewSearch(db)
- solver.NextSolution()
- solver.EndSearch()
+ def testSplitDomainLower(self):
+ solver = pywrapcp.Solver("testSplitDomainLower")
+ x = solver.IntVar(0, 10, "x")
+ phase = SplitDomainDecisionBuilder(x, 3, True)
+ solver.NewSearch(phase)
+ self.assertTrue(solver.NextSolution())
+ self.assertEqual(0, x.Min())
+ self.assertEqual(3, x.Max())
+ self.assertTrue(solver.NextSolution())
+ self.assertEqual(4, x.Min())
+ self.assertEqual(10, x.Max())
+ self.assertFalse(solver.NextSolution())
+ solver.EndSearch()
+
+ def testSplitDomainUpper(self):
+ solver = pywrapcp.Solver("testSplitDomainUpper")
+ x = solver.IntVar(0, 10, "x")
+ phase = SplitDomainDecisionBuilder(x, 6, False)
+ solver.NewSearch(phase)
+ self.assertTrue(solver.NextSolution())
+ self.assertEqual(7, x.Min())
+ self.assertEqual(10, x.Max())
+ self.assertTrue(solver.NextSolution())
+ self.assertEqual(0, x.Min())
+ self.assertEqual(6, x.Max())
+ self.assertFalse(solver.NextSolution())
+ solver.EndSearch()
+
+ def testTrueConstraint(self):
+ solver = pywrapcp.Solver("test")
+ x1 = solver.IntVar(4, 8, "x1")
+ x2 = solver.IntVar(3, 7, "x2")
+ x3 = solver.IntVar(1, 5, "x3")
+ solver.Add((x1 >= 3) + (x2 >= 6) + (x3 <= 3) == 3)
+ db = solver.Phase(
+ [x1, x2, x3], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+ solver.NewSearch(db)
+ solver.NextSolution()
+ solver.EndSearch()
+
+ def testFalseConstraint(self):
+ solver = pywrapcp.Solver("test")
+ x1 = solver.IntVar(4, 8, "x1")
+ x2 = solver.IntVar(3, 7, "x2")
+ x3 = solver.IntVar(1, 5, "x3")
+ solver.Add((x1 <= 3) + (x2 >= 6) + (x3 <= 3) == 3)
+ db = solver.Phase(
+ [x1, x2, x3], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+ solver.NewSearch(db)
+ solver.NextSolution()
+ solver.EndSearch()
class IntVarLocalSearchOperatorTest(absltest.TestCase):
- def test_ctor(self):
- solver = pywrapcp.Solver("Solve")
- int_vars = [solver.IntVar(0, 4) for _ in range(4)]
- ivlso = pywrapcp.IntVarLocalSearchOperator(int_vars)
- self.assertIsNotNone(ivlso)
+ def test_ctor(self):
+ solver = pywrapcp.Solver("Solve")
+ int_vars = [solver.IntVar(0, 4) for _ in range(4)]
+ ivlso = pywrapcp.IntVarLocalSearchOperator(int_vars)
+ self.assertIsNotNone(ivlso)
- def test_api(self):
- # print(f"{dir(pywrapcp.IntVarLocalSearchOperator)}")
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Size"))
+ def test_api(self):
+ # print(f"{dir(pywrapcp.IntVarLocalSearchOperator)}")
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Size"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Var"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "AddVars"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "IsIncremental"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Var"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "AddVars"))
+ self.assertTrue(
+ hasattr(pywrapcp.IntVarLocalSearchOperator, "IsIncremental")
+ )
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Activate"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Deactivate"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Activated"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Activate"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Deactivate"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Activated"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "OldValue"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "PrevValue"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Value"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "SetValue"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "OldValue"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "PrevValue"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Value"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "SetValue"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Start"))
- self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "OnStart"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "Start"))
+ self.assertTrue(hasattr(pywrapcp.IntVarLocalSearchOperator, "OnStart"))
if __name__ == "__main__":
- absltest.main()
+ absltest.main()
diff --git a/ortools/constraint_solver/python/pywraprouting_test.py b/ortools/constraint_solver/python/pywraprouting_test.py
deleted file mode 100755
index 4b0841dc859..00000000000
--- a/ortools/constraint_solver/python/pywraprouting_test.py
+++ /dev/null
@@ -1,896 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Test Routing API."""
-
-import functools
-
-from absl.testing import absltest
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-
-def Distance(node_i, node_j):
- return node_i + node_j
-
-
-def TransitDistance(manager, i, j):
- return Distance(manager.IndexToNode(i), manager.IndexToNode(j))
-
-
-def UnaryTransitDistance(manager, i):
- return Distance(manager.IndexToNode(i), 0)
-
-
-def One(unused_i, unused_j):
- return 1
-
-
-def Two(unused_i, unused_j):
- return 1
-
-
-def Three(unused_i, unused_j):
- return 1
-
-
-class Callback:
-
- def __init__(self, model):
- self.model = model
- self.costs = []
-
- def __call__(self):
- self.costs.append(self.model.CostVar().Max())
-
-
-class TestPyWrapRoutingIndexManager(absltest.TestCase):
-
- def testCtor(self):
- manager = pywrapcp.RoutingIndexManager(42, 3, 7)
- self.assertIsNotNone(manager)
- self.assertEqual(42, manager.GetNumberOfNodes())
- self.assertEqual(3, manager.GetNumberOfVehicles())
- self.assertEqual(42 + 3 * 2 - 1, manager.GetNumberOfIndices())
- for i in range(manager.GetNumberOfVehicles()):
- self.assertEqual(7, manager.IndexToNode(manager.GetStartIndex(i)))
- self.assertEqual(7, manager.IndexToNode(manager.GetEndIndex(i)))
-
- def testCtorMultiDepotSame(self):
- manager = pywrapcp.RoutingIndexManager(42, 3, [0, 0, 0], [0, 0, 0])
- self.assertIsNotNone(manager)
- self.assertEqual(42, manager.GetNumberOfNodes())
- self.assertEqual(3, manager.GetNumberOfVehicles())
- self.assertEqual(42 + 3 * 2 - 1, manager.GetNumberOfIndices())
- for i in range(manager.GetNumberOfVehicles()):
- self.assertEqual(0, manager.IndexToNode(manager.GetStartIndex(i)))
- self.assertEqual(0, manager.IndexToNode(manager.GetEndIndex(i)))
-
- def testCtorMultiDepotAllDiff(self):
- manager = pywrapcp.RoutingIndexManager(42, 3, [1, 2, 3], [4, 5, 6])
- self.assertIsNotNone(manager)
- self.assertEqual(42, manager.GetNumberOfNodes())
- self.assertEqual(3, manager.GetNumberOfVehicles())
- self.assertEqual(42, manager.GetNumberOfIndices())
- for i in range(manager.GetNumberOfVehicles()):
- self.assertEqual(i + 1, manager.IndexToNode(manager.GetStartIndex(i)))
- self.assertEqual(i + 4, manager.IndexToNode(manager.GetEndIndex(i)))
-
-
-class TestPyWrapRoutingModel(absltest.TestCase):
-
- def testCtor(self):
- manager = pywrapcp.RoutingIndexManager(42, 3, 7)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- for i in range(manager.GetNumberOfVehicles()):
- self.assertEqual(7, manager.IndexToNode(model.Start(i)))
- self.assertEqual(7, manager.IndexToNode(model.End(i)))
-
- def testSolve(self):
- manager = pywrapcp.RoutingIndexManager(42, 3, 7)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.Solve()
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_OPTIMAL, model.status())
- self.assertIsNotNone(assignment)
- self.assertEqual(0, assignment.ObjectiveValue())
-
- def testSolveMultiDepot(self):
- manager = pywrapcp.RoutingIndexManager(42, 3, [1, 2, 3], [4, 5, 6])
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.Solve()
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_OPTIMAL, model.status())
- self.assertIsNotNone(assignment)
- self.assertEqual(0, assignment.ObjectiveValue())
-
- def testTransitCallback(self):
- manager = pywrapcp.RoutingIndexManager(5, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- self.assertEqual(1, transit_idx)
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.Solve()
- self.assertTrue(assignment)
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertEqual(20, assignment.ObjectiveValue())
-
- def testTransitLambda(self):
- manager = pywrapcp.RoutingIndexManager(5, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- transit_id = model.RegisterTransitCallback(lambda from_index, to_index: 1)
- self.assertEqual(1, transit_id)
- model.SetArcCostEvaluatorOfAllVehicles(transit_id)
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.Solve()
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertIsNotNone(assignment)
- self.assertEqual(5, assignment.ObjectiveValue())
-
- def testTransitMatrix(self):
- manager = pywrapcp.RoutingIndexManager(5, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- matrix = [[i + 1 for i in range(5)] for _ in range(5)]
- transit_idx = model.RegisterTransitMatrix(matrix)
- self.assertEqual(1, transit_idx)
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.Solve()
- self.assertTrue(assignment)
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertEqual(15, assignment.ObjectiveValue())
-
- def testUnaryTransitCallback(self):
- manager = pywrapcp.RoutingIndexManager(5, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- transit_idx = model.RegisterUnaryTransitCallback(
- functools.partial(UnaryTransitDistance, manager)
- )
- self.assertEqual(1, transit_idx)
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.Solve()
- self.assertTrue(assignment)
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertEqual(10, assignment.ObjectiveValue())
-
- def testUnaryTransitLambda(self):
- manager = pywrapcp.RoutingIndexManager(5, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- transit_id = model.RegisterUnaryTransitCallback(lambda from_index: 1)
- self.assertEqual(1, transit_id)
- model.SetArcCostEvaluatorOfAllVehicles(transit_id)
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.Solve()
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertIsNotNone(assignment)
- self.assertEqual(5, assignment.ObjectiveValue())
-
- def testUnaryTransitVector(self):
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- vector = list(range(10))
- transit_idx = model.RegisterUnaryTransitVector(vector)
- self.assertEqual(1, transit_idx)
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.Solve()
- self.assertTrue(assignment)
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertEqual(45, assignment.ObjectiveValue())
-
- def testTSP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertEqual(90, assignment.ObjectiveValue())
- # Inspect solution
- index = model.Start(0)
- visited_nodes = []
- expected_visited_nodes = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
- while not model.IsEnd(index):
- index = assignment.Value(model.NextVar(index))
- visited_nodes.append(manager.IndexToNode(index))
- self.assertEqual(expected_visited_nodes, visited_nodes)
-
- def testVRP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 2, [0, 1], [1, 0])
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(89, assignment.ObjectiveValue())
- # Inspect solution
- index = model.Start(1)
- visited_nodes = []
- expected_visited_nodes = [2, 4, 6, 8, 3, 5, 7, 9, 0]
- while not model.IsEnd(index):
- index = assignment.Value(model.NextVar(index))
- visited_nodes.append(manager.IndexToNode(index))
- self.assertEqual(expected_visited_nodes, visited_nodes)
- self.assertTrue(model.IsEnd(assignment.Value(model.NextVar(model.Start(0)))))
-
- def testDimensionTSP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Add generic dimension
- model.AddDimension(transit_idx, 90, 90, True, "distance")
- distance_dimension = model.GetDimensionOrDie("distance")
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(90, assignment.ObjectiveValue())
- # Inspect solution
- node = model.Start(0)
- cumul = 0
- while not model.IsEnd(node):
- self.assertEqual(cumul, assignment.Value(distance_dimension.CumulVar(node)))
- next_node = assignment.Value(model.NextVar(node))
- cumul += Distance(node, next_node)
- node = next_node
-
- def testDimensionWithVehicleCapacitiesTSP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Add generic dimension
- model.AddDimensionWithVehicleCapacity(transit_idx, 90, [90], True, "distance")
- distance_dimension = model.GetDimensionOrDie("distance")
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(90, assignment.ObjectiveValue())
- # Inspect solution
- node = model.Start(0)
- cumul = 0
- while not model.IsEnd(node):
- self.assertEqual(cumul, assignment.Value(distance_dimension.CumulVar(node)))
- next_node = assignment.Value(model.NextVar(node))
- cumul += Distance(node, next_node)
- node = next_node
-
- def testDimensionWithVehicleTransitsTSP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Add generic dimension
- model.AddDimensionWithVehicleTransits([transit_idx], 90, 90, True, "distance")
- distance_dimension = model.GetDimensionOrDie("distance")
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(90, assignment.ObjectiveValue())
- # Inspect solution
- node = model.Start(0)
- cumul = 0
- while not model.IsEnd(node):
- self.assertEqual(cumul, assignment.Value(distance_dimension.CumulVar(node)))
- next_node = assignment.Value(model.NextVar(node))
- cumul += Distance(node, next_node)
- node = next_node
-
- def testDimensionWithVehicleTransitsVRP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 3, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Add generic dimension
- distances = [
- model.RegisterTransitCallback(One),
- model.RegisterTransitCallback(Two),
- model.RegisterTransitCallback(Three),
- ]
- model.AddDimensionWithVehicleTransits(distances, 90, 90, True, "distance")
- distance_dimension = model.GetDimensionOrDie("distance")
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(90, assignment.ObjectiveValue())
- # Inspect solution
- for vehicle in range(0, model.vehicles()):
- node = model.Start(vehicle)
- cumul = 0
- while not model.IsEnd(node):
- self.assertEqual(
- cumul, assignment.Min(distance_dimension.CumulVar(node))
- )
- next_node = assignment.Value(model.NextVar(node))
- # Increment cumul by the vehicle distance which is equal to the vehicle
- # index + 1, cf. distances.
- cumul += vehicle + 1
- node = next_node
-
- def testConstantDimensionTSP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 3, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Add constant dimension
- constant_id, success = model.AddConstantDimension(1, 100, True, "count")
- self.assertTrue(success)
- self.assertEqual(transit_idx + 1, constant_id)
- count_dimension = model.GetDimensionOrDie("count")
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(90, assignment.ObjectiveValue())
- # Inspect solution
- node = model.Start(0)
- count = 0
- while not model.IsEnd(node):
- self.assertEqual(count, assignment.Value(count_dimension.CumulVar(node)))
- count += 1
- node = assignment.Value(model.NextVar(node))
- self.assertEqual(10, count)
-
- def testVectorDimensionTSP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Add vector dimension
- values = list(range(10))
- unary_transit_id, success = model.AddVectorDimension(
- values, 100, True, "vector"
- )
- self.assertTrue(success)
- self.assertEqual(transit_idx + 1, unary_transit_id)
- vector_dimension = model.GetDimensionOrDie("vector")
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertIsNotNone(assignment)
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertEqual(90, assignment.ObjectiveValue())
- # Inspect solution
- node = model.Start(0)
- cumul = 0
- while not model.IsEnd(node):
- self.assertEqual(cumul, assignment.Value(vector_dimension.CumulVar(node)))
- cumul += values[node]
- node = assignment.Value(model.NextVar(node))
-
- def testMatrixDimensionTSP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(5, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- cost = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(cost)
- # Add matrix dimension
- values = [[j for _ in range(5)] for j in range(5)]
- transit_id, success = model.AddMatrixDimension(values, 100, True, "matrix")
- self.assertTrue(success)
- self.assertEqual(cost + 1, transit_id)
- dimension = model.GetDimensionOrDie("matrix")
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertIsNotNone(assignment)
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertEqual(20, assignment.ObjectiveValue())
- # Inspect solution
- index = model.Start(0)
- cumul = 0
- while not model.IsEnd(index):
- self.assertEqual(cumul, assignment.Value(dimension.CumulVar(index)))
- cumul += values[manager.IndexToNode(index)][manager.IndexToNode(index)]
- index = assignment.Value(model.NextVar(index))
-
- def testMatrixDimensionVRP(self):
- manager = pywrapcp.RoutingIndexManager(5, 2, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- matrix = [[i + j for i in range(5)] for j in range(5)]
- transit_idx = model.RegisterTransitMatrix(matrix)
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Add matrix dimension
- matrix_transit_idx, success = model.AddMatrixDimension(
- matrix, 10, True, "matrix" # capacity # fix_start_cumul_to_zero
- )
- self.assertTrue(success)
- self.assertEqual(transit_idx + 1, matrix_transit_idx)
- dimension = model.GetDimensionOrDie("matrix")
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- self.assertEqual(
- routing_enums_pb2.RoutingSearchStatus.ROUTING_NOT_SOLVED, model.status()
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertIsNotNone(assignment)
- self.assertEqual(routing_enums_pb2.RoutingSearchStatus.ROUTING_SUCCESS, model.status())
- self.assertEqual(20, assignment.ObjectiveValue())
- # Inspect solution
- for v in range(manager.GetNumberOfVehicles()):
- index = model.Start(v)
- cumul = 0
- while not model.IsEnd(index):
- self.assertEqual(cumul, assignment.Value(dimension.CumulVar(index)))
- prev_index = index
- index = assignment.Value(model.NextVar(index))
- cumul += matrix[manager.IndexToNode(prev_index)][
- manager.IndexToNode(index)
- ]
-
- def testDisjunctionTSP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Add disjunctions
- disjunctions = [
- [manager.NodeToIndex(1), manager.NodeToIndex(2)],
- [manager.NodeToIndex(3)],
- [manager.NodeToIndex(4)],
- [manager.NodeToIndex(5)],
- [manager.NodeToIndex(6)],
- [manager.NodeToIndex(7)],
- [manager.NodeToIndex(8)],
- [manager.NodeToIndex(9)],
- ]
- for disjunction in disjunctions:
- model.AddDisjunction(disjunction)
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(86, assignment.ObjectiveValue())
- # Inspect solution
- node = model.Start(0)
- count = 0
- while not model.IsEnd(node):
- count += 1
- node = assignment.Value(model.NextVar(node))
- self.assertEqual(9, count)
-
- def testDisjunctionPenaltyTSP(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Add disjunctions
- disjunctions = [
- ([manager.NodeToIndex(1), manager.NodeToIndex(2)], 1000),
- ([manager.NodeToIndex(3)], 1000),
- ([manager.NodeToIndex(4)], 1000),
- ([manager.NodeToIndex(5)], 1000),
- ([manager.NodeToIndex(6)], 1000),
- ([manager.NodeToIndex(7)], 1000),
- ([manager.NodeToIndex(8)], 1000),
- ([manager.NodeToIndex(9)], 0),
- ]
- for disjunction, penalty in disjunctions:
- model.AddDisjunction(disjunction, penalty)
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.FIRST_UNBOUND_MIN_VALUE
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(68, assignment.ObjectiveValue())
- # Inspect solution
- node = model.Start(0)
- count = 0
- while not model.IsEnd(node):
- count += 1
- node = assignment.Value(model.NextVar(node))
- self.assertEqual(8, count)
-
- def testRoutingModelParameters(self):
- # Create routing model with parameters
- parameters = pywrapcp.DefaultRoutingModelParameters()
- parameters.solver_parameters.CopyFrom(pywrapcp.Solver.DefaultSolverParameters())
- parameters.solver_parameters.trace_propagation = True
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager, parameters)
- self.assertIsNotNone(model)
- self.assertEqual(1, model.vehicles())
- self.assertTrue(model.solver().Parameters().trace_propagation)
-
- def testRoutingLocalSearchFiltering(self):
- parameters = pywrapcp.DefaultRoutingModelParameters()
- parameters.solver_parameters.profile_local_search = True
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager, parameters)
- self.assertIsNotNone(model)
- model.Solve()
- profile = model.solver().LocalSearchProfile()
- print(profile)
- self.assertIsInstance(profile, str)
- self.assertTrue(profile) # Verify it's not empty.
-
- def testRoutingSearchParameters(self):
- # Create routing model
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Close with parameters
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.SAVINGS
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.local_search_operators.use_two_opt = pywrapcp.BOOL_FALSE
- search_parameters.solution_limit = 20
- model.CloseModelWithParameters(search_parameters)
- # Solve with parameters
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(
- 11, model.GetNumberOfDecisionsInFirstSolution(search_parameters)
- )
- self.assertEqual(0, model.GetNumberOfRejectsInFirstSolution(search_parameters))
- self.assertEqual(90, assignment.ObjectiveValue())
- assignment = model.SolveFromAssignmentWithParameters(
- assignment, search_parameters
- )
- self.assertEqual(90, assignment.ObjectiveValue())
-
- def testFindErrorInRoutingSearchParameters(self):
- params = pywrapcp.DefaultRoutingSearchParameters()
- params.local_search_operators.use_cross = pywrapcp.BOOL_UNSPECIFIED
- self.assertIn("cross", pywrapcp.FindErrorInRoutingSearchParameters(params))
-
- def testCallback(self):
- manager = pywrapcp.RoutingIndexManager(10, 1, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- callback = Callback(model)
- model.AddAtSolutionCallback(callback)
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- assignment = model.SolveWithParameters(search_parameters)
- self.assertEqual(90, assignment.ObjectiveValue())
- self.assertEqual(len(callback.costs), 1)
- self.assertEqual(90, callback.costs[0])
-
- def testReadAssignment(self):
- manager = pywrapcp.RoutingIndexManager(10, 2, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # TODO(user): porting this segfaults the tests.
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- routes = [
- [
- manager.NodeToIndex(1),
- manager.NodeToIndex(3),
- manager.NodeToIndex(5),
- manager.NodeToIndex(4),
- manager.NodeToIndex(2),
- manager.NodeToIndex(6),
- ],
- [
- manager.NodeToIndex(7),
- manager.NodeToIndex(9),
- manager.NodeToIndex(8),
- ],
- ]
- assignment = model.ReadAssignmentFromRoutes(routes, False)
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.solution_limit = 1
- solution = model.SolveFromAssignmentWithParameters(
- assignment, search_parameters
- )
- self.assertEqual(90, solution.ObjectiveValue())
- for vehicle in range(0, model.vehicles()):
- node = model.Start(vehicle)
- count = 0
- while not model.IsEnd(node):
- node = solution.Value(model.NextVar(node))
- if not model.IsEnd(node):
- self.assertEqual(routes[vehicle][count], manager.IndexToNode(node))
- count += 1
-
- def testAutomaticFirstSolutionStrategy_simple(self):
- manager = pywrapcp.RoutingIndexManager(31, 7, 3)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- self.assertIsNotNone(model.SolveWithParameters(search_parameters))
- self.assertEqual(
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC,
- model.GetAutomaticFirstSolutionStrategy(),
- )
-
- def testAutomaticFirstSolutionStrategy_pd(self):
- manager = pywrapcp.RoutingIndexManager(31, 7, 0)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- # Add cost function
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- model.SetArcCostEvaluatorOfAllVehicles(transit_idx)
- self.assertTrue(model.AddDimension(transit_idx, 0, 1000, True, "distance"))
- dst_dimension = model.GetDimensionOrDie("distance")
- # Add few Pickup and Delivery
- for request in [[2 * i, 2 * i + 1] for i in range(1, 15)]:
- pickup_index = manager.NodeToIndex(request[0])
- delivery_index = manager.NodeToIndex(request[1])
- model.AddPickupAndDelivery(pickup_index, delivery_index)
- model.solver().Add(
- model.VehicleVar(pickup_index) == model.VehicleVar(delivery_index)
- )
- model.solver().Add(
- dst_dimension.CumulVar(pickup_index)
- <= dst_dimension.CumulVar(delivery_index)
- )
- # Solve
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- self.assertIsNotNone(model.SolveWithParameters(search_parameters))
- self.assertEqual(
- routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION,
- model.GetAutomaticFirstSolutionStrategy(),
- )
-
-
-class TestBoundCost(absltest.TestCase):
-
- def testCtor(self):
- bound_cost = pywrapcp.BoundCost()
- self.assertIsNotNone(bound_cost)
- self.assertEqual(0, bound_cost.bound)
- self.assertEqual(0, bound_cost.cost)
-
- bound_cost = pywrapcp.BoundCost(97, 43)
- self.assertIsNotNone(bound_cost)
- self.assertEqual(97, bound_cost.bound)
- self.assertEqual(43, bound_cost.cost)
-
-
-class TestRoutingDimension(absltest.TestCase):
-
- def testCtor(self):
- manager = pywrapcp.RoutingIndexManager(31, 7, 3)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- self.assertTrue(model.AddDimension(transit_idx, 90, 90, True, "distance"))
- model.GetDimensionOrDie("distance")
-
- def testSoftSpanUpperBound(self):
- manager = pywrapcp.RoutingIndexManager(31, 7, 3)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- self.assertTrue(model.AddDimension(transit_idx, 100, 100, True, "distance"))
- dimension = model.GetDimensionOrDie("distance")
-
- bound_cost = pywrapcp.BoundCost(97, 43)
- self.assertIsNotNone(bound_cost)
- self.assertFalse(dimension.HasSoftSpanUpperBounds())
- for v in range(manager.GetNumberOfVehicles()):
- dimension.SetSoftSpanUpperBoundForVehicle(bound_cost, v)
- bc = dimension.GetSoftSpanUpperBoundForVehicle(v)
- self.assertIsNotNone(bc)
- self.assertEqual(97, bc.bound)
- self.assertEqual(43, bc.cost)
- self.assertTrue(dimension.HasSoftSpanUpperBounds())
-
- def testQuadraticCostSoftSpanUpperBound(self):
- manager = pywrapcp.RoutingIndexManager(31, 7, 3)
- self.assertIsNotNone(manager)
- model = pywrapcp.RoutingModel(manager)
- self.assertIsNotNone(model)
- transit_idx = model.RegisterTransitCallback(
- functools.partial(TransitDistance, manager)
- )
- self.assertTrue(model.AddDimension(transit_idx, 100, 100, True, "distance"))
- dimension = model.GetDimensionOrDie("distance")
-
- bound_cost = pywrapcp.BoundCost(97, 43)
- self.assertIsNotNone(bound_cost)
- self.assertFalse(dimension.HasQuadraticCostSoftSpanUpperBounds())
- for v in range(manager.GetNumberOfVehicles()):
- dimension.SetQuadraticCostSoftSpanUpperBoundForVehicle(bound_cost, v)
- bc = dimension.GetQuadraticCostSoftSpanUpperBoundForVehicle(v)
- self.assertIsNotNone(bc)
- self.assertEqual(97, bc.bound)
- self.assertEqual(43, bc.cost)
- self.assertTrue(dimension.HasQuadraticCostSoftSpanUpperBounds())
-
-
-# TODO(user): Add tests for Routing[Cost|Vehicle|Resource]ClassIndex
-
-if __name__ == "__main__":
- absltest.main()
diff --git a/ortools/constraint_solver/routing_breaks.cc b/ortools/constraint_solver/routing_breaks.cc
deleted file mode 100644
index b316dfbc6c9..00000000000
--- a/ortools/constraint_solver/routing_breaks.cc
+++ /dev/null
@@ -1,1089 +0,0 @@
-// Copyright 2010-2025 Google LLC
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-#include
-
-#include "absl/log/check.h"
-#include "absl/types/span.h"
-#include "ortools/base/logging.h"
-#include "ortools/base/types.h"
-#include "ortools/constraint_solver/constraint_solver.h"
-#include "ortools/constraint_solver/constraint_solveri.h"
-#include "ortools/constraint_solver/routing.h"
-#include "ortools/constraint_solver/routing_filters.h"
-#include "ortools/util/saturated_arithmetic.h"
-#include "ortools/util/scheduling.h"
-#include "ortools/util/sorted_interval_list.h"
-
-namespace operations_research {
-
-bool DisjunctivePropagator::Propagate(Tasks* tasks) {
- DCHECK_LE(tasks->num_chain_tasks, tasks->start_min.size());
- DCHECK_EQ(tasks->start_min.size(), tasks->start_max.size());
- DCHECK_EQ(tasks->start_min.size(), tasks->duration_min.size());
- DCHECK_EQ(tasks->start_min.size(), tasks->duration_max.size());
- DCHECK_EQ(tasks->start_min.size(), tasks->end_min.size());
- DCHECK_EQ(tasks->start_min.size(), tasks->end_max.size());
- DCHECK_EQ(tasks->start_min.size(), tasks->is_preemptible.size());
- // Do forward deductions, then backward deductions.
- // All propagators are followed by Precedences(),
- // except MirrorTasks() after which Precedences() would make no deductions,
- // and DetectablePrecedencesWithChain() which is stronger than Precedences().
- // Precedences() is a propagator that does obvious deductions quickly (O(n)),
- // so interleaving Precedences() speeds up the propagation fixed point.
- if (!Precedences(tasks) || !EdgeFinding(tasks) || !Precedences(tasks) ||
- !DetectablePrecedencesWithChain(tasks)) {
- return false;
- }
- if (!tasks->forbidden_intervals.empty()) {
- if (!ForbiddenIntervals(tasks) || !Precedences(tasks)) return false;
- }
- if (!tasks->distance_duration.empty()) {
- if (!DistanceDuration(tasks) || !Precedences(tasks)) return false;
- }
- if (!MirrorTasks(tasks) || !EdgeFinding(tasks) || !Precedences(tasks) ||
- !DetectablePrecedencesWithChain(tasks) || !MirrorTasks(tasks)) {
- return false;
- }
- return true;
-}
-
-bool DisjunctivePropagator::Precedences(Tasks* tasks) {
- const int num_chain_tasks = tasks->num_chain_tasks;
- if (num_chain_tasks > 0) {
- // Propagate forwards.
- int64_t time = tasks->start_min[0];
- for (int task = 0; task < num_chain_tasks; ++task) {
- time = std::max(tasks->start_min[task], time);
- tasks->start_min[task] = time;
- time = CapAdd(time, tasks->duration_min[task]);
- if (tasks->end_max[task] < time) return false;
- time = std::max(time, tasks->end_min[task]);
- tasks->end_min[task] = time;
- }
- // Propagate backwards.
- time = tasks->end_max[num_chain_tasks - 1];
- for (int task = num_chain_tasks - 1; task >= 0; --task) {
- time = std::min(tasks->end_max[task], time);
- tasks->end_max[task] = time;
- time = CapSub(time, tasks->duration_min[task]);
- if (time < tasks->start_min[task]) return false;
- time = std::min(time, tasks->start_max[task]);
- tasks->start_max[task] = time;
- }
- }
- const int num_tasks = tasks->start_min.size();
- for (int task = 0; task < num_tasks; ++task) {
- // Enforce start + duration <= end.
- tasks->end_min[task] =
- std::max(tasks->end_min[task],
- CapAdd(tasks->start_min[task], tasks->duration_min[task]));
- tasks->start_max[task] =
- std::min(tasks->start_max[task],
- CapSub(tasks->end_max[task], tasks->duration_min[task]));
- tasks->duration_max[task] =
- std::min(tasks->duration_max[task],
- CapSub(tasks->end_max[task], tasks->start_min[task]));
- if (!tasks->is_preemptible[task]) {
- // Enforce start + duration == end for nonpreemptibles.
- tasks->end_max[task] =
- std::min(tasks->end_max[task],
- CapAdd(tasks->start_max[task], tasks->duration_max[task]));
- tasks->start_min[task] =
- std::max(tasks->start_min[task],
- CapSub(tasks->end_min[task], tasks->duration_max[task]));
- tasks->duration_min[task] =
- std::max(tasks->duration_min[task],
- CapSub(tasks->end_min[task], tasks->start_max[task]));
- }
- if (tasks->duration_min[task] > tasks->duration_max[task]) return false;
- if (tasks->end_min[task] > tasks->end_max[task]) return false;
- if (tasks->start_min[task] > tasks->start_max[task]) return false;
- }
- return true;
-}
-
-bool DisjunctivePropagator::MirrorTasks(Tasks* tasks) {
- const int num_tasks = tasks->start_min.size();
- // For all tasks, start_min := -end_max and end_max := -start_min.
- for (int task = 0; task < num_tasks; ++task) {
- const int64_t t = -tasks->start_min[task];
- tasks->start_min[task] = -tasks->end_max[task];
- tasks->end_max[task] = t;
- }
- // For all tasks, start_max := -end_min and end_min := -start_max.
- for (int task = 0; task < num_tasks; ++task) {
- const int64_t t = -tasks->start_max[task];
- tasks->start_max[task] = -tasks->end_min[task];
- tasks->end_min[task] = t;
- }
- // In the mirror problem, tasks linked by precedences are in reversed order.
- const int num_chain_tasks = tasks->num_chain_tasks;
- for (const auto it :
- {tasks->start_min.begin(), tasks->start_max.begin(),
- tasks->duration_min.begin(), tasks->duration_max.begin(),
- tasks->end_min.begin(), tasks->end_max.begin()}) {
- std::reverse(it, it + num_chain_tasks);
- std::reverse(it + num_chain_tasks, it + num_tasks);
- }
- std::reverse(tasks->is_preemptible.begin(),
- tasks->is_preemptible.begin() + num_chain_tasks);
- std::reverse(tasks->is_preemptible.begin() + num_chain_tasks,
- tasks->is_preemptible.begin() + num_tasks);
- return true;
-}
-
-bool DisjunctivePropagator::EdgeFinding(Tasks* tasks) {
- const int num_tasks = tasks->start_min.size();
- // Prepare start_min events for tree.
- tasks_by_start_min_.resize(num_tasks);
- std::iota(tasks_by_start_min_.begin(), tasks_by_start_min_.end(), 0);
- std::sort(
- tasks_by_start_min_.begin(), tasks_by_start_min_.end(),
- [&](int i, int j) { return tasks->start_min[i] < tasks->start_min[j]; });
- event_of_task_.resize(num_tasks);
- for (int event = 0; event < num_tasks; ++event) {
- event_of_task_[tasks_by_start_min_[event]] = event;
- }
- // Tasks will be browsed according to end_max order.
- tasks_by_end_max_.resize(num_tasks);
- std::iota(tasks_by_end_max_.begin(), tasks_by_end_max_.end(), 0);
- std::sort(
- tasks_by_end_max_.begin(), tasks_by_end_max_.end(),
- [&](int i, int j) { return tasks->end_max[i] < tasks->end_max[j]; });
-
- // Generic overload checking: insert tasks by end_max,
- // fail if envelope > end_max.
- theta_lambda_tree_.Reset(num_tasks);
- for (const int task : tasks_by_end_max_) {
- theta_lambda_tree_.AddOrUpdateEvent(
- event_of_task_[task], tasks->start_min[task], tasks->duration_min[task],
- tasks->duration_min[task]);
- if (theta_lambda_tree_.GetEnvelope() > tasks->end_max[task]) {
- return false;
- }
- }
-
- // Generic edge finding: from full set of tasks, at each end_max event in
- // decreasing order, check lambda feasibility, then move end_max task from
- // theta to lambda.
- for (int i = num_tasks - 1; i >= 0; --i) {
- const int task = tasks_by_end_max_[i];
- const int64_t envelope = theta_lambda_tree_.GetEnvelope();
- // If a nonpreemptible optional would overload end_max, push to envelope.
- while (theta_lambda_tree_.GetOptionalEnvelope() > tasks->end_max[task]) {
- int critical_event; // Dummy value.
- int optional_event;
- int64_t available_energy; // Dummy value.
- theta_lambda_tree_.GetEventsWithOptionalEnvelopeGreaterThan(
- tasks->end_max[task], &critical_event, &optional_event,
- &available_energy);
- const int optional_task = tasks_by_start_min_[optional_event];
- tasks->start_min[optional_task] =
- std::max(tasks->start_min[optional_task], envelope);
- theta_lambda_tree_.RemoveEvent(optional_event);
- }
- if (!tasks->is_preemptible[task]) {
- theta_lambda_tree_.AddOrUpdateOptionalEvent(event_of_task_[task],
- tasks->start_min[task],
- tasks->duration_min[task]);
- } else {
- theta_lambda_tree_.RemoveEvent(event_of_task_[task]);
- }
- }
- return true;
-}
-
-bool DisjunctivePropagator::DetectablePrecedencesWithChain(Tasks* tasks) {
- const int num_tasks = tasks->start_min.size();
- // Prepare start_min events for tree.
- tasks_by_start_min_.resize(num_tasks);
- std::iota(tasks_by_start_min_.begin(), tasks_by_start_min_.end(), 0);
- std::sort(
- tasks_by_start_min_.begin(), tasks_by_start_min_.end(),
- [&](int i, int j) { return tasks->start_min[i] < tasks->start_min[j]; });
- event_of_task_.resize(num_tasks);
- for (int event = 0; event < num_tasks; ++event) {
- event_of_task_[tasks_by_start_min_[event]] = event;
- }
- theta_lambda_tree_.Reset(num_tasks);
-
- // Sort nonchain tasks by start max = end_max - duration_min.
- const int num_chain_tasks = tasks->num_chain_tasks;
- nonchain_tasks_by_start_max_.resize(num_tasks - num_chain_tasks);
- std::iota(nonchain_tasks_by_start_max_.begin(),
- nonchain_tasks_by_start_max_.end(), num_chain_tasks);
- std::sort(nonchain_tasks_by_start_max_.begin(),
- nonchain_tasks_by_start_max_.end(), [&tasks](int i, int j) {
- return tasks->end_max[i] - tasks->duration_min[i] <
- tasks->end_max[j] - tasks->duration_min[j];
- });
-
- // Detectable precedences, specialized for routes: for every task on route,
- // put all tasks before it in the tree, then push with envelope.
- int index_nonchain = 0;
- for (int i = 0; i < num_chain_tasks; ++i) {
- if (!tasks->is_preemptible[i]) {
- // Add all nonchain tasks detected before i.
- while (index_nonchain < nonchain_tasks_by_start_max_.size()) {
- const int task = nonchain_tasks_by_start_max_[index_nonchain];
- if (tasks->end_max[task] - tasks->duration_min[task] >=
- tasks->start_min[i] + tasks->duration_min[i])
- break;
- theta_lambda_tree_.AddOrUpdateEvent(
- event_of_task_[task], tasks->start_min[task],
- tasks->duration_min[task], tasks->duration_min[task]);
- index_nonchain++;
- }
- }
- // All chain and nonchain tasks before i are now in the tree, push i.
- const int64_t new_start_min = theta_lambda_tree_.GetEnvelope();
- // Add i to the tree before updating it.
- theta_lambda_tree_.AddOrUpdateEvent(event_of_task_[i], tasks->start_min[i],
- tasks->duration_min[i],
- tasks->duration_min[i]);
- tasks->start_min[i] = std::max(tasks->start_min[i], new_start_min);
- }
- return true;
-}
-
-bool DisjunctivePropagator::ForbiddenIntervals(Tasks* tasks) {
- if (tasks->forbidden_intervals.empty()) return true;
- const int num_tasks = tasks->start_min.size();
- for (int task = 0; task < num_tasks; ++task) {
- if (tasks->duration_min[task] == 0) continue;
- if (tasks->forbidden_intervals[task] == nullptr) continue;
- // If start_min forbidden, push to next feasible value.
- {
- const auto& interval =
- tasks->forbidden_intervals[task]->FirstIntervalGreaterOrEqual(
- tasks->start_min[task]);
- if (interval == tasks->forbidden_intervals[task]->end()) continue;
- if (interval->start <= tasks->start_min[task]) {
- tasks->start_min[task] = CapAdd(interval->end, 1);
- }
- }
- // If end_max forbidden, push to next feasible value.
- {
- const int64_t start_max =
- CapSub(tasks->end_max[task], tasks->duration_min[task]);
- const auto& interval =
- tasks->forbidden_intervals[task]->LastIntervalLessOrEqual(start_max);
- if (interval == tasks->forbidden_intervals[task]->end()) continue;
- if (interval->end >= start_max) {
- tasks->end_max[task] =
- CapAdd(interval->start, tasks->duration_min[task] - 1);
- }
- }
- if (CapAdd(tasks->start_min[task], tasks->duration_min[task]) >
- tasks->end_max[task]) {
- return false;
- }
- }
- return true;
-}
-
-bool DisjunctivePropagator::DistanceDuration(Tasks* tasks) {
- if (tasks->distance_duration.empty()) return true;
- if (tasks->num_chain_tasks == 0) return true;
- const int route_start = 0;
- const int route_end = tasks->num_chain_tasks - 1;
- const int num_tasks = tasks->start_min.size();
- for (int i = 0; i < tasks->distance_duration.size(); ++i) {
- const int64_t max_distance = tasks->distance_duration[i].first;
- const int64_t minimum_break_duration = tasks->distance_duration[i].second;
-
- // This is a sweeping algorithm that looks whether the union of intervals
- // defined by breaks and route start/end is (-infty, +infty).
- // Those intervals are:
- // - route start: (-infty, start_max + distance]
- // - route end: [end_min, +infty)
- // - breaks: [start_min, end_max + distance) if their duration_max
- // is >= min_duration, empty set otherwise.
- // If sweeping finds that a time point can be covered by only one interval,
- // it will force the corresponding break or route start/end to cover this
- // point, which can force a break to be above minimum_break_duration.
-
- // We suppose break tasks are ordered, so the algorithm supposes that
- // start_min(task_n) <= start_min(task_{n+1}) and
- // end_max(task_n) <= end_max(task_{n+1}).
- for (int task = tasks->num_chain_tasks + 1; task < num_tasks; ++task) {
- tasks->start_min[task] =
- std::max(tasks->start_min[task], tasks->start_min[task - 1]);
- }
- for (int task = num_tasks - 2; task >= tasks->num_chain_tasks; --task) {
- tasks->end_max[task] =
- std::min(tasks->end_max[task], tasks->end_max[task + 1]);
- }
- // Skip breaks that cannot be performed after start.
- int index_break_by_emax = tasks->num_chain_tasks;
- while (index_break_by_emax < num_tasks &&
- tasks->end_max[index_break_by_emax] <= tasks->end_min[route_start]) {
- ++index_break_by_emax;
- }
- // Special case: no breaks after start.
- if (index_break_by_emax == num_tasks) {
- tasks->end_min[route_start] =
- std::max(tasks->end_min[route_start],
- CapSub(tasks->start_min[route_end], max_distance));
- tasks->start_max[route_end] =
- std::min(tasks->start_max[route_end],
- CapAdd(tasks->end_max[route_start], max_distance));
- continue;
- }
- // There will be a break after start, so route_start coverage is tested.
- // Initial state: start at -inf with route_start in task_set.
- // Sweep over profile, looking for time points where the number of
- // covering breaks is <= 1. If it is 0, fail, otherwise force the
- // unique break to cover it.
- // Route start and end get a special treatment, not sure generalizing
- // would be better.
- int64_t xor_active_tasks = route_start;
- int num_active_tasks = 1;
- int64_t previous_time = std::numeric_limits::min();
- const int64_t route_start_time =
- CapAdd(tasks->end_max[route_start], max_distance);
- const int64_t route_end_time = tasks->start_min[route_end];
- // NOTE: all smin events must be closed by a corresponding emax event,
- // otherwise num_active_tasks is wrong (too high) and the reasoning misses
- // some filtering.
- int index_break_by_smin = index_break_by_emax;
- while (index_break_by_emax < num_tasks) {
- // Find next time point among start/end of covering intervals.
- int64_t current_time =
- CapAdd(tasks->end_max[index_break_by_emax], max_distance);
- if (index_break_by_smin < num_tasks) {
- current_time =
- std::min(current_time, tasks->start_min[index_break_by_smin]);
- }
- if (previous_time < route_start_time && route_start_time < current_time) {
- current_time = route_start_time;
- }
- if (previous_time < route_end_time && route_end_time < current_time) {
- current_time = route_end_time;
- }
- // If num_active_tasks was 1, the unique active task must cover from
- // previous_time to current_time.
- if (num_active_tasks == 1) {
- // xor_active_tasks is the unique task that can cover [previous_time,
- // current_time).
- if (xor_active_tasks != route_end) {
- tasks->end_min[xor_active_tasks] =
- std::max(tasks->end_min[xor_active_tasks],
- CapSub(current_time, max_distance));
- if (xor_active_tasks != route_start) {
- tasks->duration_min[xor_active_tasks] = std::max(
- tasks->duration_min[xor_active_tasks],
- std::max(
- minimum_break_duration,
- CapSub(CapSub(current_time, max_distance), previous_time)));
- }
- }
- }
- // Process covering intervals that start or end at current_time.
- while (index_break_by_smin < num_tasks &&
- current_time == tasks->start_min[index_break_by_smin]) {
- if (tasks->duration_max[index_break_by_smin] >=
- minimum_break_duration) {
- xor_active_tasks ^= index_break_by_smin;
- ++num_active_tasks;
- }
- ++index_break_by_smin;
- }
- while (index_break_by_emax < num_tasks &&
- current_time ==
- CapAdd(tasks->end_max[index_break_by_emax], max_distance)) {
- if (tasks->duration_max[index_break_by_emax] >=
- minimum_break_duration) {
- xor_active_tasks ^= index_break_by_emax;
- --num_active_tasks;
- }
- ++index_break_by_emax;
- }
- if (current_time == route_start_time) {
- xor_active_tasks ^= route_start;
- --num_active_tasks;
- }
- if (current_time == route_end_time) {
- xor_active_tasks ^= route_end;
- ++num_active_tasks;
- }
- // If num_active_tasks becomes 1, the unique active task must cover from
- // current_time.
- if (num_active_tasks <= 0) return false;
- if (num_active_tasks == 1) {
- if (xor_active_tasks != route_start) {
- // xor_active_tasks is the unique task that can cover from
- // current_time to the next time point.
- tasks->start_max[xor_active_tasks] =
- std::min(tasks->start_max[xor_active_tasks], current_time);
- if (xor_active_tasks != route_end) {
- tasks->duration_min[xor_active_tasks] = std::max(
- tasks->duration_min[xor_active_tasks], minimum_break_duration);
- }
- }
- }
- previous_time = current_time;
- }
- }
- return true;
-}
-
-bool DisjunctivePropagator::ChainSpanMin(Tasks* tasks) {
- const int num_chain_tasks = tasks->num_chain_tasks;
- if (num_chain_tasks < 1) return true;
- // TODO(user): add stronger bounds.
- // The duration of the chain plus that of nonchain tasks that must be
- // performed during the chain is a lower bound of the chain span.
- {
- int64_t sum_chain_durations = 0;
- const auto duration_start = tasks->duration_min.begin();
- const auto duration_end = tasks->duration_min.begin() + num_chain_tasks;
- for (auto it = duration_start; it != duration_end; ++it) {
- sum_chain_durations = CapAdd(sum_chain_durations, *it);
- }
- int64_t sum_forced_nonchain_durations = 0;
- for (int i = num_chain_tasks; i < tasks->start_min.size(); ++i) {
- // Tasks that can be executed before or after are skipped.
- if (tasks->end_min[i] <= tasks->start_max[0] ||
- tasks->end_min[num_chain_tasks - 1] <= tasks->start_max[i]) {
- continue;
- }
- sum_forced_nonchain_durations =
- CapAdd(sum_forced_nonchain_durations, tasks->duration_min[i]);
- }
- tasks->span_min =
- std::max(tasks->span_min,
- CapAdd(sum_chain_durations, sum_forced_nonchain_durations));
- }
- // The difference end of the chain - start of the chain is a lower bound.
- {
- const int64_t end_minus_start =
- CapSub(tasks->end_min[num_chain_tasks - 1], tasks->start_max[0]);
- tasks->span_min = std::max(tasks->span_min, end_minus_start);
- }
-
- return tasks->span_min <= tasks->span_max;
-}
-
-// Computes a lower bound of the span of the chain, taking into account only
-// the first nonchain task.
-// TODO(user): extend to arbitrary number of nonchain tasks.
-bool DisjunctivePropagator::ChainSpanMinDynamic(Tasks* tasks) {
- // Do nothing if there are no chain tasks or no nonchain tasks.
- const int num_chain_tasks = tasks->num_chain_tasks;
- if (num_chain_tasks < 1) return true;
- if (num_chain_tasks == tasks->start_min.size()) return true;
- const int task_index = num_chain_tasks;
- if (!Precedences(tasks)) return false;
- const int64_t min_possible_chain_end = tasks->end_min[num_chain_tasks - 1];
- const int64_t max_possible_chain_start = tasks->start_max[0];
- // For each chain task i, compute cumulated duration of chain tasks before it.
- int64_t total_duration = 0;
- {
- total_duration_before_.resize(num_chain_tasks);
- for (int i = 0; i < num_chain_tasks; ++i) {
- total_duration_before_[i] = total_duration;
- total_duration = CapAdd(total_duration, tasks->duration_min[i]);
- }
- }
- // Estimate span min of chain tasks. Use the schedule that ends at
- // min_possible_chain_end and starts at smallest of start_max[0] or the
- // threshold where pushing start[0] later does not make a difference to the
- // chain span because of chain precedence constraints,
- // i.e. min_possible_chain_end - total_duration.
- {
- const int64_t chain_span_min =
- min_possible_chain_end -
- std::min(tasks->start_max[0], min_possible_chain_end - total_duration);
- if (chain_span_min > tasks->span_max) {
- return false;
- } else {
- tasks->span_min = std::max(tasks->span_min, chain_span_min);
- }
- // If task can be performed before or after the chain,
- // span_min is chain_span_min.
- if (tasks->end_min[task_index] <= tasks->start_max[0] ||
- tasks->end_min[num_chain_tasks - 1] <= tasks->start_max[task_index]) {
- return true;
- }
- }
- // Scan all possible preemption positions of the nontask chain,
- // keep the one that yields the minimum span.
- int64_t span_min = std::numeric_limits::max();
- bool schedule_is_feasible = false;
- for (int i = 0; i < num_chain_tasks; ++i) {
- if (!tasks->is_preemptible[i]) continue;
- // Estimate span min if tasks is performed during i.
- // For all possible minimal-span schedules, there is a schedule where task i
- // and nonchain task form a single block. Thus, we only consider those.
- const int64_t block_start_min =
- std::max(tasks->start_min[i],
- tasks->start_min[task_index] - tasks->duration_min[i]);
- const int64_t block_start_max =
- std::min(tasks->start_max[task_index],
- tasks->start_max[i] - tasks->duration_min[task_index]);
- if (block_start_min > block_start_max) continue;
-
- // Compute the block start that yields the minimal span.
- // Given a feasible block start, a chain of minimum span constrained to
- // this particular block start can be obtained by scheduling all tasks after
- // the block at their earliest, and all tasks before it at their latest.
- // The span can be decomposed into two parts: the head, which are the
- // tasks that are before the block, and the tail, which are the block and
- // the tasks after it.
- // When the block start varies, the head length of the optimal schedule
- // described above decreases as much as the block start decreases, until
- // an inflection point at which it stays constant. That inflection value
- // is the one where the precedence constraints force the chain start to
- // decrease because of durations.
- const int64_t head_inflection =
- max_possible_chain_start + total_duration_before_[i];
- // The map from block start to minimal tail length also has an inflection
- // point, that additionally depends on the nonchain task's duration.
- const int64_t tail_inflection =
- min_possible_chain_end - (total_duration - total_duration_before_[i]) -
- tasks->duration_min[task_index];
- // All block start values between these two yield the same minimal span.
- // Indeed, first, mind that the inflection points might be in any order.
- // - if head_inflection < tail_inflection, then inside the interval
- // [head_inflection, tail_inflection], increasing the block start by delta
- // decreases the tail length by delta and increases the head length by
- // delta too.
- // - if tail_inflection < head_inflection, then inside the interval
- // [tail_inflection, head_inflection], head length is constantly at
- // total_duration_before_[i], and tail length is also constant.
- // In both cases, outside of the interval, one part is constant and the
- // other increases as much as the distance to the interval.
- // We can abstract inflection point to the interval they form.
- const int64_t optimal_interval_min_start =
- std::min(head_inflection, tail_inflection);
- const int64_t optimal_interval_max_start =
- std::max(head_inflection, tail_inflection);
- // If the optimal interval for block start intersects the feasible interval,
- // we can select any point within it, for instance the earliest one.
- int64_t block_start = std::max(optimal_interval_min_start, block_start_min);
- // If the intervals do not intersect, the feasible value closest to the
- // optimal interval has the minimal span, because the span increases as
- // much as the distance to the optimal interval.
- if (optimal_interval_max_start < block_start_min) {
- // Optimal interval is before feasible interval, closest is feasible min.
- block_start = block_start_min;
- } else if (block_start_max < optimal_interval_min_start) {
- // Optimal interval is after feasible interval, closest is feasible max.
- block_start = block_start_max;
- }
- // Compute span for the chosen block start.
- const int64_t head_duration =
- std::max(block_start, head_inflection) - max_possible_chain_start;
- const int64_t tail_duration =
- min_possible_chain_end - std::min(block_start, tail_inflection);
- const int64_t optimal_span_at_i = head_duration + tail_duration;
- span_min = std::min(span_min, optimal_span_at_i);
- schedule_is_feasible = true;
- }
- if (!schedule_is_feasible || span_min > tasks->span_max) {
- return false;
- } else {
- tasks->span_min = std::max(tasks->span_min, span_min);
- return true;
- }
-}
-
-void AppendTasksFromPath(absl::Span path,
- const TravelBounds& travel_bounds,
- const RoutingDimension& dimension,
- DisjunctivePropagator::Tasks* tasks) {
- const int num_nodes = path.size();
- DCHECK_EQ(travel_bounds.pre_travels.size(), num_nodes - 1);
- DCHECK_EQ(travel_bounds.post_travels.size(), num_nodes - 1);
- for (int i = 0; i < num_nodes; ++i) {
- const int64_t cumul_min = dimension.CumulVar(path[i])->Min();
- const int64_t cumul_max = dimension.CumulVar(path[i])->Max();
- // Add task associated to visit i.
- // Visits start at Cumul(path[i]) - before_visit
- // and end at Cumul(path[i]) + after_visit
- {
- const int64_t before_visit =
- (i == 0) ? 0 : travel_bounds.post_travels[i - 1];
- const int64_t after_visit =
- (i == num_nodes - 1) ? 0 : travel_bounds.pre_travels[i];
-
- tasks->start_min.push_back(CapSub(cumul_min, before_visit));
- tasks->start_max.push_back(CapSub(cumul_max, before_visit));
- tasks->duration_min.push_back(CapAdd(before_visit, after_visit));
- tasks->duration_max.push_back(CapAdd(before_visit, after_visit));
- tasks->end_min.push_back(CapAdd(cumul_min, after_visit));
- tasks->end_max.push_back(CapAdd(cumul_max, after_visit));
- tasks->is_preemptible.push_back(false);
- }
- if (i == num_nodes - 1) break;
-
- // Tasks from travels.
- // A travel task starts at Cumul(path[i]) + pre_travel,
- // last for FixedTransitVar(path[i]) - pre_travel - post_travel,
- // and must end at the latest at Cumul(path[i+1]) - post_travel.
- {
- const int64_t pre_travel = travel_bounds.pre_travels[i];
- const int64_t post_travel = travel_bounds.post_travels[i];
- tasks->start_min.push_back(CapAdd(cumul_min, pre_travel));
- tasks->start_max.push_back(CapAdd(cumul_max, pre_travel));
- tasks->duration_min.push_back(
- std::max(0, CapSub(travel_bounds.min_travels[i],
- CapAdd(pre_travel, post_travel))));
- tasks->duration_max.push_back(
- travel_bounds.max_travels[i] == std::numeric_limits::max()
- ? std::numeric_limits::max()
- : std::max(0, CapSub(travel_bounds.max_travels[i],
- CapAdd(pre_travel, post_travel))));
- tasks->end_min.push_back(
- CapSub(dimension.CumulVar(path[i + 1])->Min(), post_travel));
- tasks->end_max.push_back(
- CapSub(dimension.CumulVar(path[i + 1])->Max(), post_travel));
- tasks->is_preemptible.push_back(true);
- }
- }
-}
-
-void FillTravelBoundsOfVehicle(int vehicle, absl::Span path,
- const RoutingDimension& dimension,
- TravelBounds* travel_bounds) {
- // Fill path and min/max/pre/post travel bounds.
- FillPathEvaluation(path, dimension.transit_evaluator(vehicle),
- &travel_bounds->min_travels);
- const int num_travels = travel_bounds->min_travels.size();
- travel_bounds->max_travels.assign(num_travels,
- std::numeric_limits::max());
- {
- const int index = dimension.GetPreTravelEvaluatorOfVehicle(vehicle);
- if (index == -1) {
- travel_bounds->pre_travels.assign(num_travels, 0);
- } else {
- FillPathEvaluation(path, dimension.model()->TransitCallback(index),
- &travel_bounds->pre_travels);
- }
- }
- {
- const int index = dimension.GetPostTravelEvaluatorOfVehicle(vehicle);
- if (index == -1) {
- travel_bounds->post_travels.assign(num_travels, 0);
- } else {
- FillPathEvaluation(path, dimension.model()->TransitCallback(index),
- &travel_bounds->post_travels);
- }
- }
-}
-
-void AppendTasksFromIntervals(const std::vector& intervals,
- DisjunctivePropagator::Tasks* tasks) {
- for (IntervalVar* interval : intervals) {
- if (!interval->MustBePerformed()) continue;
- tasks->start_min.push_back(interval->StartMin());
- tasks->start_max.push_back(interval->StartMax());
- tasks->duration_min.push_back(interval->DurationMin());
- tasks->duration_max.push_back(interval->DurationMax());
- tasks->end_min.push_back(interval->EndMin());
- tasks->end_max.push_back(interval->EndMax());
- tasks->is_preemptible.push_back(false);
- }
-}
-
-GlobalVehicleBreaksConstraint::GlobalVehicleBreaksConstraint(
- const RoutingDimension* dimension)
- : Constraint(dimension->model()->solver()),
- model_(dimension->model()),
- dimension_(dimension) {
- vehicle_demons_.resize(model_->vehicles());
-}
-
-void GlobalVehicleBreaksConstraint::Post() {
- for (int vehicle = 0; vehicle < model_->vehicles(); vehicle++) {
- if (dimension_->GetBreakIntervalsOfVehicle(vehicle).empty() &&
- dimension_->GetBreakDistanceDurationOfVehicle(vehicle).empty()) {
- continue;
- }
- vehicle_demons_[vehicle] = MakeDelayedConstraintDemon1(
- solver(), this, &GlobalVehicleBreaksConstraint::PropagateVehicle,
- "PropagateVehicle", vehicle);
- for (IntervalVar* interval :
- dimension_->GetBreakIntervalsOfVehicle(vehicle)) {
- interval->WhenAnything(vehicle_demons_[vehicle]);
- }
- }
- const int num_cumuls = dimension_->cumuls().size();
- const int num_nexts = model_->Nexts().size();
- for (int node = 0; node < num_cumuls; node++) {
- Demon* dimension_demon = MakeConstraintDemon1(
- solver(), this, &GlobalVehicleBreaksConstraint::PropagateNode,
- "PropagateNode", node);
- if (node < num_nexts) {
- model_->NextVar(node)->WhenBound(dimension_demon);
- dimension_->SlackVar(node)->WhenRange(dimension_demon);
- }
- model_->VehicleVar(node)->WhenBound(dimension_demon);
- dimension_->CumulVar(node)->WhenRange(dimension_demon);
- }
-}
-
-void GlobalVehicleBreaksConstraint::InitialPropagate() {
- for (int vehicle = 0; vehicle < model_->vehicles(); vehicle++) {
- if (!dimension_->GetBreakIntervalsOfVehicle(vehicle).empty() ||
- !dimension_->GetBreakDistanceDurationOfVehicle(vehicle).empty()) {
- PropagateVehicle(vehicle);
- }
- }
-}
-
-// This dispatches node events to the right vehicle propagator.
-// It also filters out a part of uninteresting events, on which the vehicle
-// propagator will not find anything new.
-void GlobalVehicleBreaksConstraint::PropagateNode(int node) {
- if (!model_->VehicleVar(node)->Bound()) return;
- const int vehicle = model_->VehicleVar(node)->Min();
- if (vehicle < 0 || vehicle_demons_[vehicle] == nullptr) return;
- EnqueueDelayedDemon(vehicle_demons_[vehicle]);
-}
-
-void GlobalVehicleBreaksConstraint::FillPartialPathOfVehicle(int vehicle) {
- path_.clear();
- int current = model_->Start(vehicle);
- while (!model_->IsEnd(current)) {
- path_.push_back(current);
- current = model_->NextVar(current)->Bound()
- ? model_->NextVar(current)->Min()
- : model_->End(vehicle);
- }
- path_.push_back(current);
-}
-
-void GlobalVehicleBreaksConstraint::FillPathTravels(
- absl::Span path) {
- const int num_travels = path.size() - 1;
- travel_bounds_.min_travels.resize(num_travels);
- travel_bounds_.max_travels.resize(num_travels);
- for (int i = 0; i < num_travels; ++i) {
- travel_bounds_.min_travels[i] = dimension_->FixedTransitVar(path[i])->Min();
- travel_bounds_.max_travels[i] = dimension_->FixedTransitVar(path[i])->Max();
- }
-}
-
-// First, perform energy-based reasoning on intervals and cumul variables.
-// Then, perform reasoning on slack variables.
-void GlobalVehicleBreaksConstraint::PropagateVehicle(int vehicle) {
- // Fill path and pre/post travel information.
- FillPartialPathOfVehicle(vehicle);
- const int num_nodes = path_.size();
- FillPathTravels(path_);
- {
- const int index = dimension_->GetPreTravelEvaluatorOfVehicle(vehicle);
- if (index == -1) {
- travel_bounds_.pre_travels.assign(num_nodes - 1, 0);
- } else {
- FillPathEvaluation(path_, model_->TransitCallback(index),
- &travel_bounds_.pre_travels);
- }
- }
- {
- const int index = dimension_->GetPostTravelEvaluatorOfVehicle(vehicle);
- if (index == -1) {
- travel_bounds_.post_travels.assign(num_nodes - 1, 0);
- } else {
- FillPathEvaluation(path_, model_->TransitCallback(index),
- &travel_bounds_.post_travels);
- }
- }
- // The last travel might not be fixed: in that case, relax its information.
- if (!model_->NextVar(path_[num_nodes - 2])->Bound()) {
- travel_bounds_.min_travels.back() = 0;
- travel_bounds_.max_travels.back() = std::numeric_limits::max();
- travel_bounds_.pre_travels.back() = 0;
- travel_bounds_.post_travels.back() = 0;
- }
-
- // Fill tasks from path, break intervals, and break constraints.
- tasks_.Clear();
- AppendTasksFromPath(path_, travel_bounds_, *dimension_, &tasks_);
- tasks_.num_chain_tasks = tasks_.start_min.size();
- AppendTasksFromIntervals(dimension_->GetBreakIntervalsOfVehicle(vehicle),
- &tasks_);
- tasks_.distance_duration =
- dimension_->GetBreakDistanceDurationOfVehicle(vehicle);
-
- // Do the actual reasoning, no need to continue if infeasible.
- if (!disjunctive_propagator_.Propagate(&tasks_)) solver()->Fail();
-
- // Make task translators to help set new bounds of CP variables.
- task_translators_.clear();
- for (int i = 0; i < num_nodes; ++i) {
- const int64_t before_visit =
- (i == 0) ? 0 : travel_bounds_.post_travels[i - 1];
- const int64_t after_visit =
- (i == num_nodes - 1) ? 0 : travel_bounds_.pre_travels[i];
- task_translators_.emplace_back(dimension_->CumulVar(path_[i]), before_visit,
- after_visit);
- if (i == num_nodes - 1) break;
- task_translators_.emplace_back(); // Dummy translator for travel tasks.
- }
- for (IntervalVar* interval :
- dimension_->GetBreakIntervalsOfVehicle(vehicle)) {
- if (!interval->MustBePerformed()) continue;
- task_translators_.emplace_back(interval);
- }
-
- // Push new bounds to CP variables.
- const int num_tasks = tasks_.start_min.size();
- for (int task = 0; task < num_tasks; ++task) {
- task_translators_[task].SetStartMin(tasks_.start_min[task]);
- task_translators_[task].SetStartMax(tasks_.start_max[task]);
- task_translators_[task].SetDurationMin(tasks_.duration_min[task]);
- task_translators_[task].SetEndMin(tasks_.end_min[task]);
- task_translators_[task].SetEndMax(tasks_.end_max[task]);
- }
-
- // Reasoning on slack variables: when intervals must be inside an arc,
- // that arc's slack must be large enough to accommodate for those.
- // TODO(user): Make a version more efficient than O(n^2).
- if (dimension_->GetBreakIntervalsOfVehicle(vehicle).empty()) return;
- // If the last arc of the path was not bound, do not change slack.
- const int64_t last_bound_arc =
- num_nodes - 2 - (model_->NextVar(path_[num_nodes - 2])->Bound() ? 0 : 1);
- for (int i = 0; i <= last_bound_arc; ++i) {
- const int64_t arc_start_max =
- CapSub(dimension_->CumulVar(path_[i])->Max(),
- i > 0 ? travel_bounds_.post_travels[i - 1] : 0);
- const int64_t arc_end_min =
- CapAdd(dimension_->CumulVar(path_[i + 1])->Min(),
- i < num_nodes - 2 ? travel_bounds_.pre_travels[i + 1] : 0);
- int64_t total_break_inside_arc = 0;
- for (IntervalVar* interval :
- dimension_->GetBreakIntervalsOfVehicle(vehicle)) {
- if (!interval->MustBePerformed()) continue;
- const int64_t interval_start_max = interval->StartMax();
- const int64_t interval_end_min = interval->EndMin();
- const int64_t interval_duration_min = interval->DurationMin();
- // If interval cannot end before the arc's from node and
- // cannot start after the 'to' node, then it must be inside the arc.
- if (arc_start_max < interval_end_min &&
- interval_start_max < arc_end_min) {
- total_break_inside_arc += interval_duration_min;
- }
- }
- dimension_->SlackVar(path_[i])->SetMin(total_break_inside_arc);
- }
- // Reasoning on optional intervals.
- // TODO(user): merge this with energy-based reasoning.
- // If there is no optional interval, skip the rest of this function.
- {
- bool has_optional = false;
- for (const IntervalVar* interval :
- dimension_->GetBreakIntervalsOfVehicle(vehicle)) {
- if (interval->MayBePerformed() && !interval->MustBePerformed()) {
- has_optional = true;
- break;
- }
- }
- if (!has_optional) return;
- }
- const std::vector& break_intervals =
- dimension_->GetBreakIntervalsOfVehicle(vehicle);
- for (int pos = 0; pos < num_nodes - 1; ++pos) {
- const int64_t current_slack_max = dimension_->SlackVar(path_[pos])->Max();
- const int64_t visit_start_offset =
- pos > 0 ? travel_bounds_.post_travels[pos - 1] : 0;
- const int64_t visit_start_max =
- CapSub(dimension_->CumulVar(path_[pos])->Max(), visit_start_offset);
- const int64_t visit_end_offset =
- (pos < num_nodes - 1) ? travel_bounds_.pre_travels[pos] : 0;
- const int64_t visit_end_min =
- CapAdd(dimension_->CumulVar(path_[pos])->Min(), visit_end_offset);
-
- for (IntervalVar* interval : break_intervals) {
- if (!interval->MayBePerformed()) continue;
- const bool interval_is_performed = interval->MustBePerformed();
- const int64_t interval_start_max = interval->StartMax();
- const int64_t interval_end_min = interval->EndMin();
- const int64_t interval_duration_min = interval->DurationMin();
- // When interval cannot fit inside current arc,
- // do disjunctive reasoning on full arc.
- if (pos < num_nodes - 1 && interval_duration_min > current_slack_max) {
- // The arc lasts from CumulVar(path_[pos]) - post_travel_[pos] to
- // CumulVar(path_[pos+1]) + pre_travel_[pos+1].
- const int64_t arc_start_offset =
- pos > 0 ? travel_bounds_.post_travels[pos - 1] : 0;
- const int64_t arc_start_max = visit_start_max;
- const int64_t arc_end_offset =
- (pos < num_nodes - 2) ? travel_bounds_.pre_travels[pos + 1] : 0;
- const int64_t arc_end_min =
- CapAdd(dimension_->CumulVar(path_[pos + 1])->Min(), arc_end_offset);
- // Interval not before.
- if (arc_start_max < interval_end_min) {
- interval->SetStartMin(arc_end_min);
- if (interval_is_performed) {
- dimension_->CumulVar(path_[pos + 1])
- ->SetMax(CapSub(interval_start_max, arc_end_offset));
- }
- }
- // Interval not after.
- if (interval_start_max < arc_end_min) {
- interval->SetEndMax(arc_start_max);
- if (interval_is_performed) {
- dimension_->CumulVar(path_[pos])
- ->SetMin(CapAdd(interval_end_min, arc_start_offset));
- }
- }
- continue;
- }
- // Interval could fit inside arc: do disjunctive reasoning between
- // interval and visit.
- // Interval not before.
- if (visit_start_max < interval_end_min) {
- interval->SetStartMin(visit_end_min);
- if (interval_is_performed) {
- dimension_->CumulVar(path_[pos])
- ->SetMax(CapSub(interval_start_max, visit_end_offset));
- }
- }
- // Interval not after.
- if (interval_start_max < visit_end_min) {
- interval->SetEndMax(visit_start_max);
- if (interval_is_performed) {
- dimension_->CumulVar(path_[pos])
- ->SetMin(CapAdd(interval_end_min, visit_start_offset));
- }
- }
- }
- }
-}
-
-namespace {
-class VehicleBreaksFilter : public BasePathFilter {
- public:
- VehicleBreaksFilter(const RoutingModel& routing_model,
- const RoutingDimension& dimension);
- std::string DebugString() const override { return "VehicleBreaksFilter"; }
- bool AcceptPath(int64_t path_start, int64_t chain_start,
- int64_t chain_end) override;
-
- private:
- // Fills path_ with the path of vehicle, start to end.
- void FillPathOfVehicle(int64_t vehicle);
- std::vector path_;
- // Handles to model.
- const RoutingModel& model_;
- const RoutingDimension& dimension_;
- // Strong energy-based filtering algorithm.
- DisjunctivePropagator disjunctive_propagator_;
- DisjunctivePropagator::Tasks tasks_;
- // Used to check whether propagation changed a vector.
- std::vector old_start_min_;
- std::vector old_start_max_;
- std::vector old_end_min_;
- std::vector old_end_max_;
-
- std::vector start_to_vehicle_;
- TravelBounds travel_bounds_;
-};
-
-VehicleBreaksFilter::VehicleBreaksFilter(const RoutingModel& routing_model,
- const RoutingDimension& dimension)
- : BasePathFilter(routing_model.Nexts(),
- routing_model.Size() + routing_model.vehicles(),
- routing_model.GetPathsMetadata()),
- model_(routing_model),
- dimension_(dimension) {
- DCHECK(dimension_.HasBreakConstraints());
- start_to_vehicle_.resize(Size(), -1);
- for (int i = 0; i < routing_model.vehicles(); ++i) {
- start_to_vehicle_[routing_model.Start(i)] = i;
- }
-}
-
-void VehicleBreaksFilter::FillPathOfVehicle(int64_t vehicle) {
- path_.clear();
- int current = model_.Start(vehicle);
- while (!model_.IsEnd(current)) {
- path_.push_back(current);
- current = GetNext(current);
- }
- path_.push_back(current);
-}
-
-bool VehicleBreaksFilter::AcceptPath(int64_t path_start, int64_t chain_start,
- int64_t chain_end) {
- const int vehicle = start_to_vehicle_[path_start];
- if (dimension_.GetBreakIntervalsOfVehicle(vehicle).empty() &&
- dimension_.GetBreakDistanceDurationOfVehicle(vehicle).empty()) {
- return true;
- }
- // Fill path and pre/post travel information.
- FillPathOfVehicle(vehicle);
- FillTravelBoundsOfVehicle(vehicle, path_, dimension_, &travel_bounds_);
- // Fill tasks from path, forbidden intervals, breaks and break constraints.
- tasks_.Clear();
- AppendTasksFromPath(path_, travel_bounds_, dimension_, &tasks_);
- tasks_.num_chain_tasks = tasks_.start_min.size();
- AppendTasksFromIntervals(dimension_.GetBreakIntervalsOfVehicle(vehicle),
- &tasks_);
- // Add forbidden intervals only if a node has some.
- tasks_.forbidden_intervals.clear();
- if (std::any_of(path_.begin(), path_.end(), [this](int64_t node) {
- return dimension_.forbidden_intervals()[node].NumIntervals() > 0;
- })) {
- tasks_.forbidden_intervals.assign(tasks_.start_min.size(), nullptr);
- for (int i = 0; i < path_.size(); ++i) {
- tasks_.forbidden_intervals[2 * i] =
- &(dimension_.forbidden_intervals()[path_[i]]);
- }
- }
- // Max distance duration constraint.
- tasks_.distance_duration =
- dimension_.GetBreakDistanceDurationOfVehicle(vehicle);
-
- // Reduce bounds until failure or fixed point is reached.
- // We set a maximum amount of iterations to avoid slow propagation.
- bool is_feasible = true;
- int maximum_num_iterations = 8;
- while (--maximum_num_iterations >= 0) {
- old_start_min_ = tasks_.start_min;
- old_start_max_ = tasks_.start_max;
- old_end_min_ = tasks_.end_min;
- old_end_max_ = tasks_.end_max;
- is_feasible = disjunctive_propagator_.Propagate(&tasks_);
- if (!is_feasible) break;
- // If fixed point reached, stop.
- if ((old_start_min_ == tasks_.start_min) &&
- (old_start_max_ == tasks_.start_max) &&
- (old_end_min_ == tasks_.end_min) && (old_end_max_ == tasks_.end_max)) {
- break;
- }
- }
- return is_feasible;
-}
-
-} // namespace
-
-IntVarLocalSearchFilter* MakeVehicleBreaksFilter(
- const RoutingModel& routing_model, const RoutingDimension& dimension) {
- return routing_model.solver()->RevAlloc(
- new VehicleBreaksFilter(routing_model, dimension));
-}
-
-} // namespace operations_research
diff --git a/ortools/constraint_solver/samples/BUILD.bazel b/ortools/constraint_solver/samples/BUILD.bazel
index ccb64d8b73c..6b6fab6fea5 100644
--- a/ortools/constraint_solver/samples/BUILD.bazel
+++ b/ortools/constraint_solver/samples/BUILD.bazel
@@ -24,39 +24,3 @@ code_sample_cc(name = "rabbits_and_pheasants_cp")
code_sample_cc(name = "simple_cp_program")
code_sample_cc(name = "simple_ls_program")
-
-code_sample_cc(name = "simple_routing_program")
-
-code_sample_cc(name = "tsp")
-
-code_sample_cc(name = "tsp_circuit_board")
-
-code_sample_cc(name = "tsp_cities")
-
-code_sample_cc(name = "tsp_distance_matrix")
-
-code_sample_cc(name = "vrp")
-
-code_sample_cc(name = "vrp_breaks")
-
-code_sample_cc(name = "vrp_capacity")
-
-code_sample_cc(name = "vrp_drop_nodes")
-
-code_sample_cc(name = "vrp_global_span")
-
-code_sample_cc(name = "vrp_initial_routes")
-
-code_sample_cc(name = "vrp_pickup_delivery")
-
-code_sample_cc(name = "vrp_pickup_delivery_fifo")
-
-code_sample_cc(name = "vrp_pickup_delivery_lifo")
-
-code_sample_cc(name = "vrp_resources")
-
-code_sample_cc(name = "vrp_starts_ends")
-
-code_sample_cc(name = "vrp_time_windows")
-
-code_sample_cc(name = "vrp_with_time_limit")
diff --git a/ortools/constraint_solver/samples/CMakeLists.txt b/ortools/constraint_solver/samples/CMakeLists.txt
index ee48e3d3dae..8bb2b71af62 100644
--- a/ortools/constraint_solver/samples/CMakeLists.txt
+++ b/ortools/constraint_solver/samples/CMakeLists.txt
@@ -17,9 +17,6 @@ endif()
if(BUILD_CXX_SAMPLES)
file(GLOB CXX_SRCS "*.cc")
- list(FILTER CXX_SRCS EXCLUDE REGEX "/cvrp_disjoint_tw")
- list(FILTER CXX_SRCS EXCLUDE REGEX "/cvrptw\.cc")
- list(FILTER CXX_SRCS EXCLUDE REGEX "/cvrptw_")
foreach(SAMPLE IN LISTS CXX_SRCS)
add_cxx_sample(FILE_NAME ${SAMPLE})
endforeach()
diff --git a/ortools/constraint_solver/samples/code_samples.bzl b/ortools/constraint_solver/samples/code_samples.bzl
index 55ceddc80c0..84bf1094687 100644
--- a/ortools/constraint_solver/samples/code_samples.bzl
+++ b/ortools/constraint_solver/samples/code_samples.bzl
@@ -22,8 +22,6 @@ def code_sample_cc(name):
deps = [
"//ortools/base",
"//ortools/constraint_solver:cp",
- "//ortools/constraint_solver:routing",
- "//ortools/constraint_solver:routing_enums_cc_proto",
],
)
@@ -35,7 +33,5 @@ def code_sample_cc(name):
":" + name + "_cc",
"//ortools/base",
"//ortools/constraint_solver:cp",
- "//ortools/constraint_solver:routing",
- "//ortools/constraint_solver:routing_enums_cc_proto",
],
)
diff --git a/ortools/constraint_solver/samples/cp_is_fun_cp.py b/ortools/constraint_solver/samples/cp_is_fun_cp.py
index fbb230be132..c4e93da681e 100755
--- a/ortools/constraint_solver/samples/cp_is_fun_cp.py
+++ b/ortools/constraint_solver/samples/cp_is_fun_cp.py
@@ -22,76 +22,77 @@
"""
# [START import]
from ortools.constraint_solver import pywrapcp
+
# [END import]
def main():
- # Constraint programming engine
- # [START solver]
- solver = pywrapcp.Solver("CP is fun!")
- # [END solver]
+ # Constraint programming engine
+ # [START solver]
+ solver = pywrapcp.Solver("CP is fun!")
+ # [END solver]
- # [START variables]
- base = 10
+ # [START variables]
+ base = 10
- # Decision variables.
- digits = list(range(0, base))
- digits_without_zero = list(range(1, base))
- c = solver.IntVar(digits_without_zero, "C")
- p = solver.IntVar(digits, "P")
- i = solver.IntVar(digits_without_zero, "I")
- s = solver.IntVar(digits, "S")
- f = solver.IntVar(digits_without_zero, "F")
- u = solver.IntVar(digits, "U")
- n = solver.IntVar(digits, "N")
- t = solver.IntVar(digits_without_zero, "T")
- r = solver.IntVar(digits, "R")
- e = solver.IntVar(digits, "E")
+ # Decision variables.
+ digits = list(range(0, base))
+ digits_without_zero = list(range(1, base))
+ c = solver.IntVar(digits_without_zero, "C")
+ p = solver.IntVar(digits, "P")
+ i = solver.IntVar(digits_without_zero, "I")
+ s = solver.IntVar(digits, "S")
+ f = solver.IntVar(digits_without_zero, "F")
+ u = solver.IntVar(digits, "U")
+ n = solver.IntVar(digits, "N")
+ t = solver.IntVar(digits_without_zero, "T")
+ r = solver.IntVar(digits, "R")
+ e = solver.IntVar(digits, "E")
- # We need to group variables in a list to use the constraint AllDifferent.
- letters = [c, p, i, s, f, u, n, t, r, e]
+ # We need to group variables in a list to use the constraint AllDifferent.
+ letters = [c, p, i, s, f, u, n, t, r, e]
- # Verify that we have enough digits.
- assert base >= len(letters)
- # [END variables]
+ # Verify that we have enough digits.
+ assert base >= len(letters)
+ # [END variables]
- # Define constraints.
- # [START constraints]
- solver.Add(solver.AllDifferent(letters))
+ # Define constraints.
+ # [START constraints]
+ solver.Add(solver.AllDifferent(letters))
- # CP + IS + FUN = TRUE
- solver.Add(
- p + s + n + base * (c + i + u) + base * base * f
- == e + base * u + base * base * r + base * base * base * t
- )
- # [END constraints]
+ # CP + IS + FUN = TRUE
+ solver.Add(
+ p + s + n + base * (c + i + u) + base * base * f
+ == e + base * u + base * base * r + base * base * base * t
+ )
+ # [END constraints]
- # [START solve]
- solution_count = 0
- db = solver.Phase(letters, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
- solver.NewSearch(db)
- while solver.NextSolution():
- print(letters)
- # Is CP + IS + FUN = TRUE?
- assert (
- base * c.Value()
- + p.Value()
- + base * i.Value()
- + s.Value()
- + base * base * f.Value()
- + base * u.Value()
- + n.Value()
- == base * base * base * t.Value()
- + base * base * r.Value()
- + base * u.Value()
- + e.Value()
- )
- solution_count += 1
- solver.EndSearch()
- print(f"Number of solutions found: {solution_count}")
- # [END solve]
+ # [START solve]
+ solution_count = 0
+ db = solver.Phase(letters, solver.INT_VAR_DEFAULT, solver.INT_VALUE_DEFAULT)
+ solver.NewSearch(db)
+ while solver.NextSolution():
+ print(letters)
+ # Is CP + IS + FUN = TRUE?
+ assert (
+ base * c.Value()
+ + p.Value()
+ + base * i.Value()
+ + s.Value()
+ + base * base * f.Value()
+ + base * u.Value()
+ + n.Value()
+ == base * base * base * t.Value()
+ + base * base * r.Value()
+ + base * u.Value()
+ + e.Value()
+ )
+ solution_count += 1
+ solver.EndSearch()
+ print(f"Number of solutions found: {solution_count}")
+ # [END solve]
if __name__ == "__main__":
- main()
+ main()
# [END program]
diff --git a/ortools/constraint_solver/samples/cvrp_reload.py b/ortools/constraint_solver/samples/cvrp_reload.py
deleted file mode 100755
index fee9315eed5..00000000000
--- a/ortools/constraint_solver/samples/cvrp_reload.py
+++ /dev/null
@@ -1,429 +0,0 @@
-#!/usr/bin/env python3
-# This Python file uses the following encoding: utf-8
-# Copyright 2015 Tin Arm Engineering AB
-# Copyright 2018 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-"""Capacitated Vehicle Routing Problem (CVRP).
-
-This is a sample using the routing library python wrapper to solve a CVRP
-problem while allowing multiple trips, i.e., vehicles can return to a depot
-to reset their load ("reload").
-
-A description of the CVRP problem can be found here:
-http://en.wikipedia.org/wiki/Vehicle_routing_problem.
-
-Distances are in meters.
-
-In order to implement multiple trips, new nodes are introduced at the same
-locations of the original depots. These additional nodes can be dropped
-from the schedule at 0 cost.
-
-The max_slack parameter associated to the capacity constraints of all nodes
-can be set to be the maximum of the vehicles' capacities, rather than 0 like
-in a traditional CVRP. Slack is required since before a solution is found,
-it is not known how much capacity will be transferred at the new nodes. For
-all the other (original) nodes, the slack is then re-set to 0.
-
-The above two considerations are implemented in `add_capacity_constraints()`.
-
-Last, it is useful to set a large distance between the initial depot and the
-new nodes introduced, to avoid schedules having spurious transits through
-those new nodes unless it's necessary to reload. This consideration is taken
-into account in `create_distance_evaluator()`.
-"""
-
-from functools import partial
-
-from ortools.constraint_solver import pywrapcp
-from ortools.constraint_solver import routing_enums_pb2
-
-
-###########################
-# Problem Data Definition #
-###########################
-def create_data_model():
- """Stores the data for the problem"""
- data = {}
- _capacity = 15
- # Locations in block unit
- _locations = [
- (4, 4), # depot
- (4, 4), # unload depot_first
- (4, 4), # unload depot_second
- (4, 4), # unload depot_third
- (4, 4), # unload depot_fourth
- (4, 4), # unload depot_fifth
- (2, 0),
- (8, 0), # locations to visit
- (0, 1),
- (1, 1),
- (5, 2),
- (7, 2),
- (3, 3),
- (6, 3),
- (5, 5),
- (8, 5),
- (1, 6),
- (2, 6),
- (3, 7),
- (6, 7),
- (0, 8),
- (7, 8),
- ]
- # Compute locations in meters using the block dimension defined as follow
- # Manhattan average block: 750ft x 264ft -> 228m x 80m
- # here we use: 114m x 80m city block
- # src: https://nyti.ms/2GDoRIe 'NY Times: Know Your distance'
- data["locations"] = [(l[0] * 114, l[1] * 80) for l in _locations]
- data["num_locations"] = len(data["locations"])
- data["demands"] = [
- 0, # depot
- -_capacity, # unload depot_first
- -_capacity, # unload depot_second
- -_capacity, # unload depot_third
- -_capacity, # unload depot_fourth
- -_capacity, # unload depot_fifth
- 3,
- 3, # 1, 2
- 3,
- 4, # 3, 4
- 3,
- 4, # 5, 6
- 8,
- 8, # 7, 8
- 3,
- 3, # 9,10
- 3,
- 3, # 11,12
- 4,
- 4, # 13, 14
- 8,
- 8,
- ] # 15, 16
- data["time_per_demand_unit"] = 5 # 5 minutes/unit
- data["time_windows"] = [
- (0, 0), # depot
- (0, 1000), # unload depot_first
- (0, 1000), # unload depot_second
- (0, 1000), # unload depot_third
- (0, 1000), # unload depot_fourth
- (0, 1000), # unload depot_fifth
- (75, 850),
- (75, 850), # 1, 2
- (60, 700),
- (45, 550), # 3, 4
- (0, 800),
- (50, 600), # 5, 6
- (0, 1000),
- (10, 200), # 7, 8
- (0, 1000),
- (75, 850), # 9, 10
- (85, 950),
- (5, 150), # 11, 12
- (15, 250),
- (10, 200), # 13, 14
- (45, 550),
- (30, 400),
- ] # 15, 16
- data["num_vehicles"] = 3
- data["vehicle_capacity"] = _capacity
- data["vehicle_max_distance"] = 10_000
- data["vehicle_max_time"] = 1_500
- data["vehicle_speed"] = 5 * 60 / 3.6 # Travel speed: 5km/h to convert in m/min
- data["depot"] = 0
- return data
-
-
-#######################
-# Problem Constraints #
-#######################
-def manhattan_distance(position_1, position_2):
- """Computes the Manhattan distance between two points"""
- return abs(position_1[0] - position_2[0]) + abs(position_1[1] - position_2[1])
-
-
-def create_distance_evaluator(data):
- """Creates callback to return distance between points."""
- _distances = {}
- # precompute distance between location to have distance callback in O(1)
- for from_node in range(data["num_locations"]):
- _distances[from_node] = {}
- for to_node in range(data["num_locations"]):
- if from_node == to_node:
- _distances[from_node][to_node] = 0
- # Forbid start/end/reload node to be consecutive.
- elif from_node in range(6) and to_node in range(6):
- _distances[from_node][to_node] = data["vehicle_max_distance"]
- else:
- _distances[from_node][to_node] = manhattan_distance(
- data["locations"][from_node], data["locations"][to_node]
- )
-
- def distance_evaluator(manager, from_node, to_node):
- """Returns the manhattan distance between the two nodes"""
- return _distances[manager.IndexToNode(from_node)][manager.IndexToNode(to_node)]
-
- return distance_evaluator
-
-
-def add_distance_dimension(routing, manager, data, distance_evaluator_index):
- """Add Global Span constraint"""
- del manager
- distance = "Distance"
- routing.AddDimension(
- distance_evaluator_index,
- 0, # null slack
- data["vehicle_max_distance"], # maximum distance per vehicle
- True, # start cumul to zero
- distance,
- )
- distance_dimension = routing.GetDimensionOrDie(distance)
- # Try to minimize the max distance among vehicles.
- # /!\ It doesn't mean the standard deviation is minimized
- distance_dimension.SetGlobalSpanCostCoefficient(100)
-
-
-def create_demand_evaluator(data):
- """Creates callback to get demands at each location."""
- _demands = data["demands"]
-
- def demand_evaluator(manager, from_node):
- """Returns the demand of the current node"""
- return _demands[manager.IndexToNode(from_node)]
-
- return demand_evaluator
-
-
-def add_capacity_constraints(routing, manager, data, demand_evaluator_index):
- """Adds capacity constraint"""
- vehicle_capacity = data["vehicle_capacity"]
- capacity = "Capacity"
- routing.AddDimension(
- demand_evaluator_index,
- vehicle_capacity,
- vehicle_capacity,
- True, # start cumul to zero
- capacity,
- )
-
- # Add Slack for reseting to zero unload depot nodes.
- # e.g. vehicle with load 10/15 arrives at node 1 (depot unload)
- # so we have CumulVar = 10(current load) + -15(unload) + 5(slack) = 0.
- capacity_dimension = routing.GetDimensionOrDie(capacity)
- # Allow to drop reloading nodes with zero cost.
- for node in [1, 2, 3, 4, 5]:
- node_index = manager.NodeToIndex(node)
- routing.AddDisjunction([node_index], 0)
-
- # Allow to drop regular node with a cost.
- for node in range(6, len(data["demands"])):
- node_index = manager.NodeToIndex(node)
- capacity_dimension.SlackVar(node_index).SetValue(0)
- routing.AddDisjunction([node_index], 100_000)
-
-
-def create_time_evaluator(data):
- """Creates callback to get total times between locations."""
-
- def service_time(data, node):
- """Gets the service time for the specified location."""
- return abs(data["demands"][node]) * data["time_per_demand_unit"]
-
- def travel_time(data, from_node, to_node):
- """Gets the travel times between two locations."""
- if from_node == to_node:
- travel_time = 0
- else:
- travel_time = (
- manhattan_distance(
- data["locations"][from_node], data["locations"][to_node]
- )
- / data["vehicle_speed"]
- )
- return travel_time
-
- _total_time = {}
- # precompute total time to have time callback in O(1)
- for from_node in range(data["num_locations"]):
- _total_time[from_node] = {}
- for to_node in range(data["num_locations"]):
- if from_node == to_node:
- _total_time[from_node][to_node] = 0
- else:
- _total_time[from_node][to_node] = int(
- service_time(data, from_node)
- + travel_time(data, from_node, to_node)
- )
-
- def time_evaluator(manager, from_node, to_node):
- """Returns the total time between the two nodes"""
- return _total_time[manager.IndexToNode(from_node)][manager.IndexToNode(to_node)]
-
- return time_evaluator
-
-
-def add_time_window_constraints(routing, manager, data, time_evaluator):
- """Add Time windows constraint"""
- time = "Time"
- max_time = data["vehicle_max_time"]
- routing.AddDimension(
- time_evaluator,
- max_time, # allow waiting time
- max_time, # maximum time per vehicle
- False, # don't force start cumul to zero since we are giving TW to start nodes
- time,
- )
- time_dimension = routing.GetDimensionOrDie(time)
- # Add time window constraints for each location except depot
- # and 'copy' the slack var in the solution object (aka Assignment) to print it
- for location_idx, time_window in enumerate(data["time_windows"]):
- if location_idx == 0:
- continue
- index = manager.NodeToIndex(location_idx)
- time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
- routing.AddToAssignment(time_dimension.SlackVar(index))
- # Add time window constraints for each vehicle start node
- # and 'copy' the slack var in the solution object (aka Assignment) to print it
- for vehicle_id in range(data["num_vehicles"]):
- index = routing.Start(vehicle_id)
- time_dimension.CumulVar(index).SetRange(
- data["time_windows"][0][0], data["time_windows"][0][1]
- )
- routing.AddToAssignment(time_dimension.SlackVar(index))
- # Warning: Slack var is not defined for vehicle's end node
- # routing.AddToAssignment(time_dimension.SlackVar(self.routing.End(vehicle_id)))
-
-
-###########
-# Printer #
-###########
-def print_solution(
- data, manager, routing, assignment
-): # pylint:disable=too-many-locals
- """Prints assignment on console"""
- print(f"Objective: {assignment.ObjectiveValue()}")
- total_distance = 0
- total_load = 0
- total_time = 0
- capacity_dimension = routing.GetDimensionOrDie("Capacity")
- time_dimension = routing.GetDimensionOrDie("Time")
- distance_dimension = routing.GetDimensionOrDie("Distance")
- dropped = []
- for order in range(6, routing.nodes()):
- index = manager.NodeToIndex(order)
- if assignment.Value(routing.NextVar(index)) == index:
- dropped.append(order)
- print(f"dropped orders: {dropped}")
- dropped = []
- for reload in range(1, 6):
- index = manager.NodeToIndex(reload)
- if assignment.Value(routing.NextVar(index)) == index:
- dropped.append(reload)
- print(f"dropped reload stations: {dropped}")
-
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(assignment, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- load_value = 0
- distance = 0
- while not routing.IsEnd(index):
- time_var = time_dimension.CumulVar(index)
- plan_output += (
- f" {manager.IndexToNode(index)} "
- f"Load({assignment.Min(capacity_dimension.CumulVar(index))}) "
- f"Time({assignment.Min(time_var)},{assignment.Max(time_var)}) ->"
- )
- previous_index = index
- index = assignment.Value(routing.NextVar(index))
- distance += distance_dimension.GetTransitValue(previous_index, index, vehicle_id)
- # capacity dimension TransitVar is negative at reload stations during replenishment
- # don't want to consider those values when calculating the total load of the route
- # hence only considering the positive values
- load_value += max(0, capacity_dimension.GetTransitValue(previous_index, index, vehicle_id))
- time_var = time_dimension.CumulVar(index)
- plan_output += (
- f" {manager.IndexToNode(index)} "
- f"Load({assignment.Min(capacity_dimension.CumulVar(index))}) "
- f"Time({assignment.Min(time_var)},{assignment.Max(time_var)})\n"
- )
- plan_output += f"Distance of the route: {distance}m\n"
- plan_output += f"Load of the route: {load_value}\n"
- plan_output += f"Time of the route: {assignment.Min(time_var)}min\n"
- print(plan_output)
- total_distance += distance
- total_load += load_value
- total_time += assignment.Min(time_var)
- print(f"Total Distance of all routes: {total_distance}m")
- print(f"Total Load of all routes: {total_load}")
- print(f"Total Time of all routes: {total_time}min")
-
-
-########
-# Main #
-########
-def main():
- """Entry point of the program"""
- # Instantiate the data problem.
- data = create_data_model()
-
- # Create the routing index manager
- manager = pywrapcp.RoutingIndexManager(
- data["num_locations"], data["num_vehicles"], data["depot"]
- )
-
- # Create Routing Model
- routing = pywrapcp.RoutingModel(manager)
-
- # Define weight of each edge
- distance_evaluator_index = routing.RegisterTransitCallback(
- partial(create_distance_evaluator(data), manager)
- )
- routing.SetArcCostEvaluatorOfAllVehicles(distance_evaluator_index)
-
- # Add Distance constraint to minimize the longuest route
- add_distance_dimension(routing, manager, data, distance_evaluator_index)
-
- # Add Capacity constraint
- demand_evaluator_index = routing.RegisterUnaryTransitCallback(
- partial(create_demand_evaluator(data), manager)
- )
- add_capacity_constraints(routing, manager, data, demand_evaluator_index)
-
- # Add Time Window constraint
- time_evaluator_index = routing.RegisterTransitCallback(
- partial(create_time_evaluator(data), manager)
- )
- add_time_window_constraints(routing, manager, data, time_evaluator_index)
-
- # Setting first solution heuristic (cheapest addition).
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- ) # pylint: disable=no-member
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.time_limit.FromSeconds(3)
-
- # Solve the problem.
- solution = routing.SolveWithParameters(search_parameters)
- if solution:
- print_solution(data, manager, routing, solution)
- else:
- print("No solution found !")
-
-
-if __name__ == "__main__":
- main()
diff --git a/ortools/constraint_solver/samples/cvrptw_break.py b/ortools/constraint_solver/samples/cvrptw_break.py
deleted file mode 100755
index f0a391dbc26..00000000000
--- a/ortools/constraint_solver/samples/cvrptw_break.py
+++ /dev/null
@@ -1,367 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Capacitated Vehicle Routing Problem with Time Windows (CVRPTW).
-
-This is a sample using the routing library python wrapper to solve a CVRPTW
-problem.
-A description of the problem can be found here:
-http://en.wikipedia.org/wiki/Vehicle_routing_problem.
-
-Distances are in meters and time in minutes.
-"""
-
-# [START import]
-import functools
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- # Locations in block unit
- locations_ = [
- # fmt: off
- (4, 4), # depot
- (2, 0), (8, 0), # locations to visit
- (0, 1), (1, 1),
- (5, 2), (7, 2),
- (3, 3), (6, 3),
- (5, 5), (8, 5),
- (1, 6), (2, 6),
- (3, 7), (6, 7),
- (0, 8), (7, 8),
- # fmt: on
- ]
- # Compute locations in meters using the block dimension defined as follow
- # Manhattan average block: 750ft x 264ft -> 228m x 80m
- # here we use: 114m x 80m city block
- # src: https://nyti.ms/2GDoRIe "NY Times: Know Your distance"
- data["locations"] = [(l[0] * 114, l[1] * 80) for l in locations_]
- data["numlocations_"] = len(data["locations"])
- data["time_windows"] = [
- # fmt: off
- (0, 0), # depot
- (75, 85), (75, 85), # 1, 2
- (60, 70), (45, 55), # 3, 4
- (0, 8), (50, 60), # 5, 6
- (0, 10), (10, 20), # 7, 8
- (0, 10), (75, 85), # 9, 10
- (85, 95), (5, 15), # 11, 12
- (15, 25), (10, 20), # 13, 14
- (45, 55), (30, 40),
- # 15, 16
- # fmt: on
- ]
- data["demands"] = [
- # fmt: off
- 0, # depot
- 1, 1, # 1, 2
- 2, 4, # 3, 4
- 2, 4, # 5, 6
- 8, 8, # 7, 8
- 1, 2, # 9, 10
- 1, 2, # 11, 12
- 4, 4, # 13, 14
- 8, 8,
- # 15, 16
- # fmt: on
- ]
- data["time_per_demand_unit"] = 5 # 5 minutes/unit
- data["num_vehicles"] = 4
- data["breaks"] = [(2, False), (2, False), (2, False), (2, False)]
- data["vehicle_capacity"] = 15
- data["vehicle_speed"] = 83 # Travel speed: 5km/h converted in m/min
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-def manhattan_distance(position_1, position_2):
- """Computes the Manhattan distance between two points."""
- return abs(position_1[0] - position_2[0]) + abs(position_1[1] - position_2[1])
-
-
-def create_distance_evaluator(data):
- """Creates callback to return distance between points."""
- distances_ = {}
- # precompute distance between location to have distance callback in O(1)
- for from_node in range(data["numlocations_"]):
- distances_[from_node] = {}
- for to_node in range(data["numlocations_"]):
- if from_node == to_node:
- distances_[from_node][to_node] = 0
- else:
- distances_[from_node][to_node] = manhattan_distance(
- data["locations"][from_node], data["locations"][to_node]
- )
-
- def distance_evaluator(manager, from_node, to_node):
- """Returns the manhattan distance between the two nodes."""
- return distances_[manager.IndexToNode(from_node)][manager.IndexToNode(to_node)]
-
- return distance_evaluator
-
-
-def create_demand_evaluator(data):
- """Creates callback to get demands at each location."""
- demands_ = data["demands"]
-
- def demand_evaluator(manager, node):
- """Returns the demand of the current node."""
- return demands_[manager.IndexToNode(node)]
-
- return demand_evaluator
-
-
-def add_capacity_constraints(routing, data, demand_evaluator_index):
- """Adds capacity constraint."""
- capacity = "Capacity"
- routing.AddDimension(
- demand_evaluator_index,
- 0, # null capacity slack
- data["vehicle_capacity"],
- True, # start cumul to zero
- capacity,
- )
-
-
-def create_time_evaluator(data):
- """Creates callback to get total times between locations."""
-
- def service_time(data, node):
- """Gets the service time for the specified location."""
- return data["demands"][node] * data["time_per_demand_unit"]
-
- def travel_time(data, from_node, to_node):
- """Gets the travel times between two locations."""
- if from_node == to_node:
- travel_time = 0
- else:
- travel_time = (
- manhattan_distance(
- data["locations"][from_node], data["locations"][to_node]
- )
- / data["vehicle_speed"]
- )
- return travel_time
-
- total_time_ = {}
- # precompute total time to have time callback in O(1)
- for from_node in range(data["numlocations_"]):
- total_time_[from_node] = {}
- for to_node in range(data["numlocations_"]):
- if from_node == to_node:
- total_time_[from_node][to_node] = 0
- else:
- total_time_[from_node][to_node] = int(
- service_time(data, from_node)
- + travel_time(data, from_node, to_node)
- )
-
- def time_evaluator(manager, from_node, to_node):
- """Returns the total time between the two nodes."""
- return total_time_[manager.IndexToNode(from_node)][manager.IndexToNode(to_node)]
-
- return time_evaluator
-
-
-def add_time_window_constraints(routing, manager, data, time_evaluator_index):
- """Add Global Span constraint."""
- time = "Time"
- horizon = 120
- routing.AddDimension(
- time_evaluator_index,
- horizon, # allow waiting time
- horizon, # maximum time per vehicle
- False, # don't force start cumul to zero
- time,
- )
- time_dimension = routing.GetDimensionOrDie(time)
- # Add time window constraints for each location except depot
- # and 'copy' the slack var in the solution object (aka Assignment) to print it
- for location_idx, time_window in enumerate(data["time_windows"]):
- if location_idx == data["depot"]:
- continue
- index = manager.NodeToIndex(location_idx)
- time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
- routing.AddToAssignment(time_dimension.SlackVar(index))
- # Add time window constraints for each vehicle start node
- # and 'copy' the slack var in the solution object (aka Assignment) to print it
- for vehicle_id in range(data["num_vehicles"]):
- index = routing.Start(vehicle_id)
- time_dimension.CumulVar(index).SetRange(
- data["time_windows"][0][0], data["time_windows"][0][1]
- )
- routing.AddToAssignment(time_dimension.SlackVar(index))
- # The time window at the end node was impliclty set in the time dimension
- # definition to be [0, horizon].
- # Warning: Slack var is not defined for vehicle end nodes and should not
- # be added to the assignment.
-
-
-# [START solution_printer]
-def print_solution(
- data, manager, routing, assignment
-): # pylint:disable=too-many-locals
- """Prints assignment on console."""
- print(f"Objective: {assignment.ObjectiveValue()}")
-
- print("Breaks:")
- intervals = assignment.IntervalVarContainer()
- for i in range(intervals.Size()):
- brk = intervals.Element(i)
- if brk.PerformedValue() == 1:
- print(
- f"{brk.Var().Name()}:"
- f" Start({brk.StartValue()}) Duration({brk.DurationValue()})"
- )
- else:
- print(f"{brk.Var().Name()}: Unperformed")
-
- total_distance = 0
- total_load = 0
- total_time = 0
- capacity_dimension = routing.GetDimensionOrDie("Capacity")
- time_dimension = routing.GetDimensionOrDie("Time")
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(assignment, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- distance = 0
- while not routing.IsEnd(index):
- load_var = capacity_dimension.CumulVar(index)
- time_var = time_dimension.CumulVar(index)
- slack_var = time_dimension.SlackVar(index)
- node = manager.IndexToNode(index)
- plan_output += (
- f" {node}"
- f" Load({assignment.Value(load_var)})"
- f" Time({assignment.Min(time_var)}, {assignment.Max(time_var)})"
- f" Slack({assignment.Min(slack_var)}, {assignment.Max(slack_var)})"
- " ->"
- )
- previous_index = index
- index = assignment.Value(routing.NextVar(index))
- distance += routing.GetArcCostForVehicle(previous_index, index, vehicle_id)
- load_var = capacity_dimension.CumulVar(index)
- time_var = time_dimension.CumulVar(index)
- node = manager.IndexToNode(index)
- plan_output += (
- f" {node}"
- f" Load({assignment.Value(load_var)})"
- f" Time({assignment.Min(time_var)}, {assignment.Max(time_var)})\n"
- )
- plan_output += f"Distance of the route: {distance}m\n"
- plan_output += f"Load of the route: {assignment.Value(load_var)}\n"
- plan_output += f"Time of the route: {assignment.Value(time_var)}\n"
- print(plan_output)
- total_distance += distance
- total_load += assignment.Value(load_var)
- total_time += assignment.Value(time_var)
- print(f"Total Distance of all routes: {total_distance}m")
- print(f"Total Load of all routes: {total_load}")
- print(f"Total Time of all routes: {total_time}min")
- # [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager
- manager = pywrapcp.RoutingIndexManager(
- data["numlocations_"], data["num_vehicles"], data["depot"]
- )
-
- # Create Routing Model
- routing = pywrapcp.RoutingModel(manager)
-
- # Define weight of each edge
- distance_evaluator_index = routing.RegisterTransitCallback(
- functools.partial(create_distance_evaluator(data), manager)
- )
- routing.SetArcCostEvaluatorOfAllVehicles(distance_evaluator_index)
-
- # Add Capacity constraint
- demand_evaluator_index = routing.RegisterUnaryTransitCallback(
- functools.partial(create_demand_evaluator(data), manager)
- )
- add_capacity_constraints(routing, data, demand_evaluator_index)
-
- # Add Time Window constraint
- time_evaluator_index = routing.RegisterTransitCallback(
- functools.partial(create_time_evaluator(data), manager)
- )
- add_time_window_constraints(routing, manager, data, time_evaluator_index)
-
- # Add breaks
- time_dimension = routing.GetDimensionOrDie("Time")
- node_visit_transit = {}
- for index in range(routing.Size()):
- node = manager.IndexToNode(index)
- node_visit_transit[index] = int(
- data["demands"][node] * data["time_per_demand_unit"]
- )
-
- break_intervals = {}
- for v in range(data["num_vehicles"]):
- vehicle_break = data["breaks"][v]
- break_intervals[v] = [
- routing.solver().FixedDurationIntervalVar(
- 15,
- 100,
- vehicle_break[0],
- vehicle_break[1],
- f"Break for vehicle {v}",
- )
- ]
- time_dimension.SetBreakIntervalsOfVehicle(
- break_intervals[v], v, node_visit_transit.values()
- )
-
- # Setting first solution heuristic (cheapest addition).
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- ) # pylint: disable=no-member
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- assignment = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if assignment:
- print_solution(data, manager, routing, assignment)
- else:
- print("No solution found!")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/nqueens_cp.py b/ortools/constraint_solver/samples/nqueens_cp.py
index eed0d217b24..2002beb4d52 100755
--- a/ortools/constraint_solver/samples/nqueens_cp.py
+++ b/ortools/constraint_solver/samples/nqueens_cp.py
@@ -17,68 +17,73 @@
# [START import]
import sys
from ortools.constraint_solver import pywrapcp
+
# [END import]
def main(board_size):
- # Creates the solver.
- # [START solver]
- solver = pywrapcp.Solver("n-queens")
- # [END solver]
+ # Creates the solver.
+ # [START solver]
+ solver = pywrapcp.Solver("n-queens")
+ # [END solver]
- # Creates the variables.
- # [START variables]
- # The array index is the column, and the value is the row.
- queens = [solver.IntVar(0, board_size - 1, f"x{i}") for i in range(board_size)]
- # [END variables]
+ # Creates the variables.
+ # [START variables]
+ # The array index is the column, and the value is the row.
+ queens = [
+ solver.IntVar(0, board_size - 1, f"x{i}") for i in range(board_size)
+ ]
+ # [END variables]
- # Creates the constraints.
- # [START constraints]
- # All rows must be different.
- solver.Add(solver.AllDifferent(queens))
+ # Creates the constraints.
+ # [START constraints]
+ # All rows must be different.
+ solver.Add(solver.AllDifferent(queens))
- # No two queens can be on the same diagonal.
- solver.Add(solver.AllDifferent([queens[i] + i for i in range(board_size)]))
- solver.Add(solver.AllDifferent([queens[i] - i for i in range(board_size)]))
- # [END constraints]
+ # No two queens can be on the same diagonal.
+ solver.Add(solver.AllDifferent([queens[i] + i for i in range(board_size)]))
+ solver.Add(solver.AllDifferent([queens[i] - i for i in range(board_size)]))
+ # [END constraints]
- # [START db]
- db = solver.Phase(queens, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE)
- # [END db]
+ # [START db]
+ db = solver.Phase(
+ queens, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+ # [END db]
- # [START solve]
- # Iterates through the solutions, displaying each.
- num_solutions = 0
- solver.NewSearch(db)
- while solver.NextSolution():
- # Displays the solution just computed.
- for i in range(board_size):
- for j in range(board_size):
- if queens[j].Value() == i:
- # There is a queen in column j, row i.
- print("Q", end=" ")
- else:
- print("_", end=" ")
- print()
- print()
- num_solutions += 1
- solver.EndSearch()
- # [END solve]
+ # [START solve]
+ # Iterates through the solutions, displaying each.
+ num_solutions = 0
+ solver.NewSearch(db)
+ while solver.NextSolution():
+ # Displays the solution just computed.
+ for i in range(board_size):
+ for j in range(board_size):
+ if queens[j].Value() == i:
+ # There is a queen in column j, row i.
+ print("Q", end=" ")
+ else:
+ print("_", end=" ")
+ print()
+ print()
+ num_solutions += 1
+ solver.EndSearch()
+ # [END solve]
- # Statistics.
- # [START statistics]
- print("\nStatistics")
- print(f" failures: {solver.Failures()}")
- print(f" branches: {solver.Branches()}")
- print(f" wall time: {solver.WallTime()} ms")
- print(f" Solutions found: {num_solutions}")
- # [END statistics]
+ # Statistics.
+ # [START statistics]
+ print("\nStatistics")
+ print(f" failures: {solver.Failures()}")
+ print(f" branches: {solver.Branches()}")
+ print(f" wall time: {solver.WallTime()} ms")
+ print(f" Solutions found: {num_solutions}")
+ # [END statistics]
if __name__ == "__main__":
- # By default, solve the 8x8 problem.
- size = 8
- if len(sys.argv) > 1:
- size = int(sys.argv[1])
- main(size)
+ # By default, solve the 8x8 problem.
+ size = 8
+ if len(sys.argv) > 1:
+ size = int(sys.argv[1])
+ main(size)
# [END program]
diff --git a/ortools/constraint_solver/samples/simple_cp_program.py b/ortools/constraint_solver/samples/simple_cp_program.py
index 7c627995589..3e5522ad0bf 100755
--- a/ortools/constraint_solver/samples/simple_cp_program.py
+++ b/ortools/constraint_solver/samples/simple_cp_program.py
@@ -17,58 +17,59 @@
# [START import]
from ortools.constraint_solver import pywrapcp
+
# [END import]
def main():
- """Entry point of the program."""
- # Instantiate the solver.
- # [START solver]
- solver = pywrapcp.Solver("CPSimple")
- # [END solver]
+ """Entry point of the program."""
+ # Instantiate the solver.
+ # [START solver]
+ solver = pywrapcp.Solver("CPSimple")
+ # [END solver]
- # Create the variables.
- # [START variables]
- num_vals = 3
- x = solver.IntVar(0, num_vals - 1, "x")
- y = solver.IntVar(0, num_vals - 1, "y")
- z = solver.IntVar(0, num_vals - 1, "z")
- # [END variables]
+ # Create the variables.
+ # [START variables]
+ num_vals = 3
+ x = solver.IntVar(0, num_vals - 1, "x")
+ y = solver.IntVar(0, num_vals - 1, "y")
+ z = solver.IntVar(0, num_vals - 1, "z")
+ # [END variables]
- # Constraint 0: x != y.
- # [START constraints]
- solver.Add(x != y)
- print("Number of constraints: ", solver.Constraints())
- # [END constraints]
+ # Constraint 0: x != y.
+ # [START constraints]
+ solver.Add(x != y)
+ print("Number of constraints: ", solver.Constraints())
+ # [END constraints]
- # Solve the problem.
- # [START solve]
- decision_builder = solver.Phase(
- [x, y, z], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
- )
- # [END solve]
+ # Solve the problem.
+ # [START solve]
+ decision_builder = solver.Phase(
+ [x, y, z], solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE
+ )
+ # [END solve]
- # Print solution on console.
- # [START print_solution]
- count = 0
- solver.NewSearch(decision_builder)
- while solver.NextSolution():
- count += 1
- solution = f"Solution {count}:\n"
- for var in [x, y, z]:
- solution += f" {var.Name()} = {var.Value()}"
- print(solution)
- solver.EndSearch()
- print(f"Number of solutions found: {count}")
- # [END print_solution]
+ # Print solution on console.
+ # [START print_solution]
+ count = 0
+ solver.NewSearch(decision_builder)
+ while solver.NextSolution():
+ count += 1
+ solution = f"Solution {count}:\n"
+ for var in [x, y, z]:
+ solution += f" {var.Name()} = {var.Value()}"
+ print(solution)
+ solver.EndSearch()
+ print(f"Number of solutions found: {count}")
+ # [END print_solution]
- # [START advanced]
- print("Advanced usage:")
- print(f"Problem solved in {solver.WallTime()}ms")
- print(f"Memory usage: {pywrapcp.Solver.MemoryUsage()}bytes")
- # [END advanced]
+ # [START advanced]
+ print("Advanced usage:")
+ print(f"Problem solved in {solver.WallTime()}ms")
+ print(f"Memory usage: {pywrapcp.Solver.MemoryUsage()}bytes")
+ # [END advanced]
if __name__ == "__main__":
- main()
+ main()
# [END program]
diff --git a/ortools/constraint_solver/samples/simple_routing_program.py b/ortools/constraint_solver/samples/simple_routing_program.py
deleted file mode 100755
index 04c7abc9884..00000000000
--- a/ortools/constraint_solver/samples/simple_routing_program.py
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Vehicle Routing example."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- num_locations = 5
- num_vehicles = 1
- depot = 0
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(num_locations, num_vehicles, depot)
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the absolute difference between the two nodes."""
- # Convert from routing variable Index to user NodeIndex.
- from_node = int(manager.IndexToNode(from_index))
- to_node = int(manager.IndexToNode(to_index))
- return abs(to_node - from_node)
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- ) # pylint: disable=no-member
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- assignment = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- print(f"Objective: {assignment.ObjectiveValue()}")
- index = routing.Start(0)
- plan_output = "Route for vehicle 0:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f"{manager.IndexToNode(index)} -> "
- previous_index = index
- index = assignment.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
- plan_output += f"{manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/tsp.py b/ortools/constraint_solver/samples/tsp.py
deleted file mode 100755
index 6e100b8c755..00000000000
--- a/ortools/constraint_solver/samples/tsp.py
+++ /dev/null
@@ -1,152 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Travelling Salesman Problem.
-
-A description of the problem can be found here:
-http://en.wikipedia.org/wiki/Travelling_salesperson_problem.
-"""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- # Locations in block units
- locations = [
- # fmt:off
- (4, 4), # depot
- (2, 0), (8, 0), # locations to visit
- (0, 1), (1, 1),
- (5, 2), (7, 2),
- (3, 3), (6, 3),
- (5, 5), (8, 5),
- (1, 6), (2, 6),
- (3, 7), (6, 7),
- (0, 8), (7, 8)
- # fmt:on
- ]
- # Convert locations in meters using a city block dimension of 114m x 80m.
- data["locations"] = [(l[0] * 114, l[1] * 80) for l in locations]
- data["num_vehicles"] = 1
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START distance_callback]
-def create_distance_callback(data, manager):
- """Creates callback to return distance between points."""
- distances_ = {}
- index_manager_ = manager
- # precompute distance between location to have distance callback in O(1)
- for from_counter, from_node in enumerate(data["locations"]):
- distances_[from_counter] = {}
- for to_counter, to_node in enumerate(data["locations"]):
- if from_counter == to_counter:
- distances_[from_counter][to_counter] = 0
- else:
- distances_[from_counter][to_counter] = abs(
- from_node[0] - to_node[0]
- ) + abs(from_node[1] - to_node[1])
-
- def distance_callback(from_index, to_index):
- """Returns the manhattan distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = index_manager_.IndexToNode(from_index)
- to_node = index_manager_.IndexToNode(to_index)
- return distances_[from_node][to_node]
-
- return distance_callback
- # [END distance_callback]
-
-
-# [START solution_printer]
-def print_solution(manager, routing, assignment):
- """Prints assignment on console."""
- print(f"Objective: {assignment.ObjectiveValue()}")
- index = routing.Start(0)
- plan_output = "Route for vehicle 0:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} ->"
- previous_index = index
- index = assignment.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
- plan_output += f" {manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- # [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["locations"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- distance_callback = create_distance_callback(data, manager)
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- assignment = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if assignment:
- print_solution(manager, routing, assignment)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/tsp_cities.py b/ortools/constraint_solver/samples/tsp_cities.py
deleted file mode 100755
index c17007fdb95..00000000000
--- a/ortools/constraint_solver/samples/tsp_cities.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Travelling Salesperson Problem (TSP) between cities."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- [0, 2451, 713, 1018, 1631, 1374, 2408, 213, 2571, 875, 1420, 2145, 1972],
- [2451, 0, 1745, 1524, 831, 1240, 959, 2596, 403, 1589, 1374, 357, 579],
- [713, 1745, 0, 355, 920, 803, 1737, 851, 1858, 262, 940, 1453, 1260],
- [1018, 1524, 355, 0, 700, 862, 1395, 1123, 1584, 466, 1056, 1280, 987],
- [1631, 831, 920, 700, 0, 663, 1021, 1769, 949, 796, 879, 586, 371],
- [1374, 1240, 803, 862, 663, 0, 1681, 1551, 1765, 547, 225, 887, 999],
- [2408, 959, 1737, 1395, 1021, 1681, 0, 2493, 678, 1724, 1891, 1114, 701],
- [213, 2596, 851, 1123, 1769, 1551, 2493, 0, 2699, 1038, 1605, 2300, 2099],
- [2571, 403, 1858, 1584, 949, 1765, 678, 2699, 0, 1744, 1645, 653, 600],
- [875, 1589, 262, 466, 796, 547, 1724, 1038, 1744, 0, 679, 1272, 1162],
- [1420, 1374, 940, 1056, 879, 225, 1891, 1605, 1645, 679, 0, 1017, 1200],
- [2145, 357, 1453, 1280, 586, 887, 1114, 2300, 653, 1272, 1017, 0, 504],
- [1972, 579, 1260, 987, 371, 999, 701, 2099, 600, 1162, 1200, 504, 0],
- ]
- data["num_vehicles"] = 1
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()} miles")
- index = routing.Start(0)
- plan_output = "Route for vehicle 0:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} ->"
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
- plan_output += f" {manager.IndexToNode(index)}\n"
- plan_output += f"Route distance: {route_distance}miles\n"
- print(plan_output)
- # [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
-
- # [END routing_model]
-
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(manager, routing, solution)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/tsp_distance_matrix.py b/ortools/constraint_solver/samples/tsp_distance_matrix.py
deleted file mode 100755
index ff3c5ef2840..00000000000
--- a/ortools/constraint_solver/samples/tsp_distance_matrix.py
+++ /dev/null
@@ -1,132 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Travelling Salesman Problem."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- data["num_vehicles"] = 1
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- index = routing.Start(0)
- plan_output = "Route for vehicle 0:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} ->"
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(previous_index, index, 0)
- plan_output += f" {manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- # [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(manager, routing, solution)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp.py b/ortools/constraint_solver/samples/vrp.py
deleted file mode 100755
index 2a4d857a344..00000000000
--- a/ortools/constraint_solver/samples/vrp.py
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Vehicles Routing Problem (VRP).
-
-This is a sample using the routing library python wrapper to solve a VRP
-problem.
-A description of the problem can be found here:
-http://en.wikipedia.org/wiki/Vehicle_routing_problem.
-
-Distances are in meters.
-"""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- total_distance = 0
- for vehicle_index in range(manager.GetNumberOfVehicles()):
- if not routing.IsVehicleUsed(solution, vehicle_index):
- continue
- index = routing.Start(vehicle_index)
- plan_output = f"Route for vehicle {vehicle_index}:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} ->"
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_index
- )
- plan_output += f" {manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- total_distance += route_distance
- print(f"Total Distance of all routes: {total_distance}m")
-
-# [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(data, manager, routing, solution)
- else:
- print("No solution found !")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_breaks.py b/ortools/constraint_solver/samples/vrp_breaks.py
deleted file mode 100755
index 23139203635..00000000000
--- a/ortools/constraint_solver/samples/vrp_breaks.py
+++ /dev/null
@@ -1,206 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Vehicle Routing Problem (VRP) with breaks.
-
- This is a sample using the routing library python wrapper to solve a VRP
- problem.
- A description of the problem can be found here:
- http://en.wikipedia.org/wiki/Vehicle_routing_problem.
-
- Durations are in minutes.
-"""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["num_vehicles"] = 4
- data["depot"] = 0
- data["time_matrix"] = [
- [0, 27, 38, 34, 29, 13, 25, 9, 15, 9, 26, 25, 19, 17, 23, 38, 33],
- [27, 0, 34, 15, 9, 25, 36, 17, 34, 37, 54, 29, 24, 33, 50, 43, 60],
- [38, 34, 0, 49, 43, 25, 13, 40, 23, 37, 20, 63, 58, 56, 39, 77, 37],
- [34, 15, 49, 0, 5, 32, 43, 25, 42, 44, 61, 25, 31, 41, 58, 28, 67],
- [29, 9, 43, 5, 0, 26, 38, 19, 36, 38, 55, 20, 25, 35, 52, 33, 62],
- [13, 25, 25, 32, 26, 0, 11, 15, 9, 12, 29, 38, 33, 31, 25, 52, 35],
- [25, 36, 13, 43, 38, 11, 0, 26, 9, 23, 17, 50, 44, 42, 25, 63, 24],
- [9, 17, 40, 25, 19, 15, 26, 0, 17, 19, 36, 23, 17, 16, 33, 37, 42],
- [15, 34, 23, 42, 36, 9, 9, 17, 0, 13, 19, 40, 34, 33, 16, 54, 25],
- [9, 37, 37, 44, 38, 12, 23, 19, 13, 0, 17, 26, 21, 19, 13, 40, 23],
- [26, 54, 20, 61, 55, 29, 17, 36, 19, 17, 0, 43, 38, 36, 19, 57, 17],
- [25, 29, 63, 25, 20, 38, 50, 23, 40, 26, 43, 0, 5, 15, 32, 13, 42],
- [19, 24, 58, 31, 25, 33, 44, 17, 34, 21, 38, 5, 0, 9, 26, 19, 36],
- [17, 33, 56, 41, 35, 31, 42, 16, 33, 19, 36, 15, 9, 0, 17, 21, 26],
- [23, 50, 39, 58, 52, 25, 25, 33, 16, 13, 19, 32, 26, 17, 0, 38, 9],
- [38, 43, 77, 28, 33, 52, 63, 37, 54, 40, 57, 13, 19, 21, 38, 0, 39],
- [33, 60, 37, 67, 62, 35, 24, 42, 25, 23, 17, 42, 36, 26, 9, 39, 0],
- ]
- # 15 min of service time
- data["service_time"] = [15] * len(data["time_matrix"])
- data["service_time"][data["depot"]] = 0
- assert len(data["time_matrix"]) == len(data["service_time"])
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
-
- print("Breaks:")
- intervals = solution.IntervalVarContainer()
- for i in range(intervals.Size()):
- brk = intervals.Element(i)
- if brk.PerformedValue():
- print(
- f"{brk.Var().Name()}: "
- + f"Start({brk.StartValue()}) Duration({brk.DurationValue()})"
- )
- else:
- print(f"{brk.Var().Name()}: Unperformed")
-
- time_dimension = routing.GetDimensionOrDie("Time")
- total_time = 0
- for vehicle_id in range(manager.GetNumberOfVehicles()):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- while not routing.IsEnd(index):
- time_var = time_dimension.CumulVar(index)
- plan_output += f"{manager.IndexToNode(index)} "
- plan_output += f"Time({solution.Value(time_var)}) -> "
- index = solution.Value(routing.NextVar(index))
- time_var = time_dimension.CumulVar(index)
- plan_output += f"{manager.IndexToNode(index)} "
- plan_output += f"Time({solution.Value(time_var)})\n"
- plan_output += f"Time of the route: {solution.Value(time_var)}min\n"
- print(plan_output)
- total_time += solution.Value(time_var)
- print(f"Total time of all routes: {total_time}min")
- # [END solution_printer]
-
-
-def main():
- """Solve the VRP with time windows."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["time_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def time_callback(from_index, to_index):
- """Returns the travel time + service time between the two nodes."""
- # Convert from routing variable Index to time matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["time_matrix"][from_node][to_node] + data["service_time"][from_node]
-
- transit_callback_index = routing.RegisterTransitCallback(time_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Time Windows constraint.
- time = "Time"
- routing.AddDimension(
- transit_callback_index,
- 10, # needed optional waiting time to place break
- 180, # maximum time per vehicle
- True, # Force start cumul to zero.
- time,
- )
- time_dimension = routing.GetDimensionOrDie(time)
- time_dimension.SetGlobalSpanCostCoefficient(10)
-
- # Breaks
- # [START break_constraint]
- # warning: Need a pre-travel array using the solver's index order.
- node_visit_transit = [0] * routing.Size()
- for index in range(routing.Size()):
- node = manager.IndexToNode(index)
- node_visit_transit[index] = data["service_time"][node]
-
- break_intervals = {}
- for v in range(manager.GetNumberOfVehicles()):
- break_intervals[v] = [
- routing.solver().FixedDurationIntervalVar(
- 50, # start min
- 60, # start max
- 10, # duration: 10 min
- False, # optional: no
- f"Break for vehicle {v}",
- )
- ]
- time_dimension.SetBreakIntervalsOfVehicle(
- break_intervals[v], v, node_visit_transit # breaks # vehicle index
- )
- # [END break_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- # search_parameters.log_search = True
- search_parameters.time_limit.FromSeconds(2)
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(manager, routing, solution)
- else:
- print("No solution found !")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_breaks_from_start.py b/ortools/constraint_solver/samples/vrp_breaks_from_start.py
deleted file mode 100755
index 0792b901cd7..00000000000
--- a/ortools/constraint_solver/samples/vrp_breaks_from_start.py
+++ /dev/null
@@ -1,214 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# [START program]
-"""Vehicles Routing Problem (VRP) with breaks relative to the vehicle start time.
-
-Each vehicles start at T:15min, T:30min, T:45min and T:60min respectively.
-
-Each vehicle must perform a break lasting 5 minutes,
-starting between 25 and 45 minutes after route start.
-e.g. vehicle 2 starting a T:45min must start a 5min breaks
-between [45+25,45+45] i.e. in the range [70, 90].
-
-Durations are in minutes.
-"""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["num_vehicles"] = 4
- data["depot"] = 0
- data["time_matrix"] = [
- [0, 27, 38, 34, 29, 13, 25, 9, 15, 9, 26, 25, 19, 17, 23, 38, 33],
- [27, 0, 34, 15, 9, 25, 36, 17, 34, 37, 54, 29, 24, 33, 50, 43, 60],
- [38, 34, 0, 49, 43, 25, 13, 40, 23, 37, 20, 63, 58, 56, 39, 77, 37],
- [34, 15, 49, 0, 5, 32, 43, 25, 42, 44, 61, 25, 31, 41, 58, 28, 67],
- [29, 9, 43, 5, 0, 26, 38, 19, 36, 38, 55, 20, 25, 35, 52, 33, 62],
- [13, 25, 25, 32, 26, 0, 11, 15, 9, 12, 29, 38, 33, 31, 25, 52, 35],
- [25, 36, 13, 43, 38, 11, 0, 26, 9, 23, 17, 50, 44, 42, 25, 63, 24],
- [9, 17, 40, 25, 19, 15, 26, 0, 17, 19, 36, 23, 17, 16, 33, 37, 42],
- [15, 34, 23, 42, 36, 9, 9, 17, 0, 13, 19, 40, 34, 33, 16, 54, 25],
- [9, 37, 37, 44, 38, 12, 23, 19, 13, 0, 17, 26, 21, 19, 13, 40, 23],
- [26, 54, 20, 61, 55, 29, 17, 36, 19, 17, 0, 43, 38, 36, 19, 57, 17],
- [25, 29, 63, 25, 20, 38, 50, 23, 40, 26, 43, 0, 5, 15, 32, 13, 42],
- [19, 24, 58, 31, 25, 33, 44, 17, 34, 21, 38, 5, 0, 9, 26, 19, 36],
- [17, 33, 56, 41, 35, 31, 42, 16, 33, 19, 36, 15, 9, 0, 17, 21, 26],
- [23, 50, 39, 58, 52, 25, 25, 33, 16, 13, 19, 32, 26, 17, 0, 38, 9],
- [38, 43, 77, 28, 33, 52, 63, 37, 54, 40, 57, 13, 19, 21, 38, 0, 39],
- [33, 60, 37, 67, 62, 35, 24, 42, 25, 23, 17, 42, 36, 26, 9, 39, 0],
- ]
- # 15 min of service time
- data["service_time"] = [15] * len(data["time_matrix"])
- data["service_time"][data["depot"]] = 0
- assert len(data["time_matrix"]) == len(data["service_time"])
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
-
- print("Breaks:")
- intervals = solution.IntervalVarContainer()
- for i in range(intervals.Size()):
- brk = intervals.Element(i)
- if brk.PerformedValue() == 1:
- print(
- f"{brk.Var().Name()}: "
- + f"Start({brk.StartValue()}) Duration({brk.DurationValue()})"
- )
- else:
- print(f"{brk.Var().Name()}: Unperformed")
-
- time_dimension = routing.GetDimensionOrDie("Time")
- total_time = 0
- for vehicle_id in range(manager.GetNumberOfVehicles()):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- while not routing.IsEnd(index):
- time_var = time_dimension.CumulVar(index)
- if routing.IsStart(index):
- start_time = solution.Value(time_var)
- plan_output += f"{manager.IndexToNode(index)} "
- plan_output += f"Time({solution.Value(time_var)}) -> "
- index = solution.Value(routing.NextVar(index))
- time_var = time_dimension.CumulVar(index)
- plan_output += f"{manager.IndexToNode(index)} "
- plan_output += f"Time({solution.Value(time_var)})"
- print(plan_output)
- route_time = solution.Value(time_var) - start_time
- print(f"Time of the route: {route_time}min\n")
- total_time += route_time
- print(f"Total time of all routes: {total_time}min")
- # [END solution_printer]
-
-
-def main():
- """Solve the VRP with time windows."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["time_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def time_callback(from_index, to_index):
- """Returns the travel time between the two nodes."""
- # Convert from routing variable Index to time matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["time_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(time_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Time Windows constraint.
- time = "Time"
- routing.AddDimension(
- transit_callback_index,
- 10, # need optional waiting time to place break
- 180, # maximum time per vehicle
- False, # Don't force start cumul to zero.
- time,
- )
- time_dimension = routing.GetDimensionOrDie(time)
- time_dimension.SetGlobalSpanCostCoefficient(10)
-
- # Each vehicle start with a 15min delay
- for vehicle_id in range(manager.GetNumberOfVehicles()):
- index = routing.Start(vehicle_id)
- time_dimension.CumulVar(index).SetValue((vehicle_id + 1) * 15)
-
- # Add breaks
- # [START break_constraint]
- # warning: Need a pre-travel array using the solver's index order.
- node_visit_transit = [0] * routing.Size()
- for index in range(routing.Size()):
- node = manager.IndexToNode(index)
- node_visit_transit[index] = data["service_time"][node]
-
- # Add a break lasting 5 minutes, start between 25 and 45 minutes after route start
- for v in range(manager.GetNumberOfVehicles()):
- start_var = time_dimension.CumulVar(routing.Start(v))
- break_start = routing.solver().Sum([routing.solver().IntVar(25, 45), start_var])
-
- break_intervals = [
- routing.solver().FixedDurationIntervalVar(
- break_start, 5, f"Break for vehicle {v}"
- )
- ]
- time_dimension.SetBreakIntervalsOfVehicle(
- break_intervals, v, node_visit_transit
- )
- # [END break_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- # search_parameters.log_search = True
- search_parameters.time_limit.FromSeconds(2)
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(manager, routing, solution)
- else:
- print("No solution found !")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_capacity.py b/ortools/constraint_solver/samples/vrp_capacity.py
deleted file mode 100755
index f720a7f5b7f..00000000000
--- a/ortools/constraint_solver/samples/vrp_capacity.py
+++ /dev/null
@@ -1,173 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Capacited Vehicles Routing Problem (CVRP)."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- # [START demands_capacities]
- data["demands"] = [0, 1, 1, 2, 4, 2, 4, 8, 8, 1, 2, 1, 2, 4, 4, 8, 8]
- data["vehicle_capacities"] = [15, 15, 15, 15]
- # [END demands_capacities]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- total_distance = 0
- total_load = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- route_load = 0
- while not routing.IsEnd(index):
- node_index = manager.IndexToNode(index)
- route_load += data["demands"][node_index]
- plan_output += f" {node_index} Load({route_load}) -> "
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f" {manager.IndexToNode(index)} Load({route_load})\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- plan_output += f"Load of the route: {route_load}\n"
- print(plan_output)
- total_distance += route_distance
- total_load += route_load
- print(f"Total distance of all routes: {total_distance}m")
- print(f"Total load of all routes: {total_load}")
- # [END solution_printer]
-
-
-def main():
- """Solve the CVRP problem."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Capacity constraint.
- # [START capacity_constraint]
- def demand_callback(from_index):
- """Returns the demand of the node."""
- # Convert from routing variable Index to demands NodeIndex.
- from_node = manager.IndexToNode(from_index)
- return data["demands"][from_node]
-
- demand_callback_index = routing.RegisterUnaryTransitCallback(demand_callback)
- routing.AddDimensionWithVehicleCapacity(
- demand_callback_index,
- 0, # null capacity slack
- data["vehicle_capacities"], # vehicle maximum capacities
- True, # start cumul to zero
- "Capacity",
- )
- # [END capacity_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.time_limit.FromSeconds(1)
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(data, manager, routing, solution)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_drop_nodes.py b/ortools/constraint_solver/samples/vrp_drop_nodes.py
deleted file mode 100755
index cb62ed8e17b..00000000000
--- a/ortools/constraint_solver/samples/vrp_drop_nodes.py
+++ /dev/null
@@ -1,186 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Capacited Vehicles Routing Problem (CVRP)."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- # [START demands_capacities]
- data["demands"] = [0, 1, 1, 3, 6, 3, 6, 8, 8, 1, 2, 1, 2, 6, 6, 8, 8]
- data["vehicle_capacities"] = [15, 15, 15, 15]
- # [END demands_capacities]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, assignment):
- """Prints assignment on console."""
- print(f"Objective: {assignment.ObjectiveValue()}")
- # Display dropped nodes.
- dropped_nodes = "Dropped nodes:"
- for node in range(routing.Size()):
- if routing.IsStart(node) or routing.IsEnd(node):
- continue
- if assignment.Value(routing.NextVar(node)) == node:
- dropped_nodes += f" {manager.IndexToNode(node)}"
- print(dropped_nodes)
- # Display routes
- total_distance = 0
- total_load = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(assignment, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- route_load = 0
- while not routing.IsEnd(index):
- node_index = manager.IndexToNode(index)
- route_load += data["demands"][node_index]
- plan_output += f" {node_index} Load({route_load}) -> "
- previous_index = index
- index = assignment.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f" {manager.IndexToNode(index)} Load({route_load})\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- plan_output += f"Load of the route: {route_load}\n"
- print(plan_output)
- total_distance += route_distance
- total_load += route_load
- print(f"Total Distance of all routes: {total_distance}m")
- print(f"Total Load of all routes: {total_load}")
- # [END solution_printer]
-
-
-def main():
- """Solve the CVRP problem."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Capacity constraint.
- # [START capacity_constraint]
- def demand_callback(from_index):
- """Returns the demand of the node."""
- # Convert from routing variable Index to demands NodeIndex.
- from_node = manager.IndexToNode(from_index)
- return data["demands"][from_node]
-
- demand_callback_index = routing.RegisterUnaryTransitCallback(demand_callback)
- routing.AddDimensionWithVehicleCapacity(
- demand_callback_index,
- 0, # null capacity slack
- data["vehicle_capacities"], # vehicle maximum capacities
- True, # start cumul to zero
- "Capacity",
- )
- # Allow to drop nodes.
- penalty = 1000
- for node in range(1, len(data["distance_matrix"])):
- routing.AddDisjunction([manager.NodeToIndex(node)], penalty)
- # [END capacity_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.time_limit.FromSeconds(1)
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- assignment = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if assignment:
- print_solution(data, manager, routing, assignment)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_global_span.py b/ortools/constraint_solver/samples/vrp_global_span.py
deleted file mode 100755
index cb37a2ad951..00000000000
--- a/ortools/constraint_solver/samples/vrp_global_span.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Vehicles Routing Problem (VRP).
-
-This is a sample using the routing library python wrapper to solve a VRP
-problem.
-A description of the problem can be found here:
-http://en.wikipedia.org/wiki/Vehicle_routing_problem.
-
-Distances are in meters.
-"""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- max_route_distance = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} -> "
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f"{manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- max_route_distance = max(route_distance, max_route_distance)
- print(f"Maximum of the route distances: {max_route_distance}m")
-
-# [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- 3000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
- # [END distance_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(data, manager, routing, solution)
- else:
- print("No solution found !")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_initial_routes.py b/ortools/constraint_solver/samples/vrp_initial_routes.py
deleted file mode 100755
index a70634205b5..00000000000
--- a/ortools/constraint_solver/samples/vrp_initial_routes.py
+++ /dev/null
@@ -1,183 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Vehicles Routing Problem (VRP)."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- # [START initial_routes]
- data["initial_routes"] = [
- # fmt: off
- [8, 16, 14, 13, 12, 11],
- [3, 4, 9, 10],
- [15, 1],
- [7, 5, 2, 6],
- # fmt: on
- ]
- # [END initial_routes]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- max_route_distance = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} -> "
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f"{manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- max_route_distance = max(route_distance, max_route_distance)
- print(f"Maximum of the route distances: {max_route_distance}m")
-
-# [END solution_printer]
-
-
-def main():
- """Solve the CVRP problem."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- 3000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
- # [END distance_constraint]
-
- # Close model with the custom search parameters.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.time_limit.FromSeconds(5)
- # When an initial solution is given for search, the model will be closed with
- # the default search parameters unless it is explicitly closed with the custom
- # search parameters.
- routing.CloseModelWithParameters(search_parameters)
- # [END parameters]
-
- # Get initial solution from routes after closing the model.
- # [START print_initial_solution]
- initial_solution = routing.ReadAssignmentFromRoutes(data["initial_routes"], True)
- print("Initial solution:")
- print_solution(data, manager, routing, initial_solution)
- # [END print_initial_solution]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveFromAssignmentWithParameters(
- initial_solution, search_parameters
- )
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print("Solution after search:")
- print_solution(data, manager, routing, solution)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_items_to_deliver.py b/ortools/constraint_solver/samples/vrp_items_to_deliver.py
deleted file mode 100755
index c2223652600..00000000000
--- a/ortools/constraint_solver/samples/vrp_items_to_deliver.py
+++ /dev/null
@@ -1,603 +0,0 @@
-#!/usr/bin/env python3
-# [START program]
-"""Vehicles Routing Problem (VRP) for delivering items from any suppliers.
-
-Description: Need to deliver some item X and Y at end nodes (at least 11 X and
-13 Y). Several locations provide them and even few provide both.
-
-fleet:
- * vehicles: 2
- * x capacity: 15
- * y capacity: 15
- * start node: 0
- * end node: 1
-"""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["num_vehicles"] = 2
- # [START starts_ends]
- data["starts"] = [0] * data["num_vehicles"]
- data["ends"] = [1] * data["num_vehicles"]
- assert len(data["starts"]) == data["num_vehicles"]
- assert len(data["ends"]) == data["num_vehicles"]
- # [END starts_ends]
-
- # [START demands_capacities]
- # Need 11 X and 13 Y
- data["providers_x"] = [
- 0, # start
- -11, # end
- 2, # X supply 1
- 2, # X supply 2
- 4, # X supply 3
- 4, # X supply 4
- 4, # X supply 5
- 5, # X supply 6
- 1, # X/Y supply 1
- 2, # X/Y supply 2
- 2, # X/Y supply 3
- 0, # Y supply 1
- 0, # Y supply 2
- 0, # Y supply 3
- 0, # Y supply 4
- 0, # Y supply 5
- 0, # Y supply 6
- ]
- data["providers_y"] = [
- 0, # start
- -13, # ends
- 0, # X supply 1
- 0, # X supply 2
- 0, # X supply 3
- 0, # X supply 4
- 0, # X supply 5
- 0, # X supply 6
- 3, # X/Y supply 1
- 2, # X/Y supply 2
- 1, # X/Y supply 3
- 3, # Y supply 1
- 3, # Y supply 2
- 3, # Y supply 3
- 3, # Y supply 4
- 3, # Y supply 5
- 5, # Y supply 6
- ]
- data["vehicle_capacities_x"] = [15] * data["num_vehicles"]
- data["vehicle_capacities_y"] = [15] * data["num_vehicles"]
- assert len(data["vehicle_capacities_x"]) == data["num_vehicles"]
- assert len(data["vehicle_capacities_y"]) == data["num_vehicles"]
- # [END demands_capacities]
- data["distance_matrix"] = [
- [
- 0,
- 548,
- 776,
- 696,
- 582,
- 274,
- 502,
- 194,
- 308,
- 194,
- 536,
- 502,
- 388,
- 354,
- 468,
- 776,
- 662,
- ],
- [
- 548,
- 0,
- 684,
- 308,
- 194,
- 502,
- 730,
- 354,
- 696,
- 742,
- 1084,
- 594,
- 480,
- 674,
- 1016,
- 868,
- 1210,
- ],
- [
- 776,
- 684,
- 0,
- 992,
- 878,
- 502,
- 274,
- 810,
- 468,
- 742,
- 400,
- 1278,
- 1164,
- 1130,
- 788,
- 1552,
- 754,
- ],
- [
- 696,
- 308,
- 992,
- 0,
- 114,
- 650,
- 878,
- 502,
- 844,
- 890,
- 1232,
- 514,
- 628,
- 822,
- 1164,
- 560,
- 1358,
- ],
- [
- 582,
- 194,
- 878,
- 114,
- 0,
- 536,
- 764,
- 388,
- 730,
- 776,
- 1118,
- 400,
- 514,
- 708,
- 1050,
- 674,
- 1244,
- ],
- [
- 274,
- 502,
- 502,
- 650,
- 536,
- 0,
- 228,
- 308,
- 194,
- 240,
- 582,
- 776,
- 662,
- 628,
- 514,
- 1050,
- 708,
- ],
- [
- 502,
- 730,
- 274,
- 878,
- 764,
- 228,
- 0,
- 536,
- 194,
- 468,
- 354,
- 1004,
- 890,
- 856,
- 514,
- 1278,
- 480,
- ],
- [
- 194,
- 354,
- 810,
- 502,
- 388,
- 308,
- 536,
- 0,
- 342,
- 388,
- 730,
- 468,
- 354,
- 320,
- 662,
- 742,
- 856,
- ],
- [
- 308,
- 696,
- 468,
- 844,
- 730,
- 194,
- 194,
- 342,
- 0,
- 274,
- 388,
- 810,
- 696,
- 662,
- 320,
- 1084,
- 514,
- ],
- [
- 194,
- 742,
- 742,
- 890,
- 776,
- 240,
- 468,
- 388,
- 274,
- 0,
- 342,
- 536,
- 422,
- 388,
- 274,
- 810,
- 468,
- ],
- [
- 536,
- 1084,
- 400,
- 1232,
- 1118,
- 582,
- 354,
- 730,
- 388,
- 342,
- 0,
- 878,
- 764,
- 730,
- 388,
- 1152,
- 354,
- ],
- [
- 502,
- 594,
- 1278,
- 514,
- 400,
- 776,
- 1004,
- 468,
- 810,
- 536,
- 878,
- 0,
- 114,
- 308,
- 650,
- 274,
- 844,
- ],
- [
- 388,
- 480,
- 1164,
- 628,
- 514,
- 662,
- 890,
- 354,
- 696,
- 422,
- 764,
- 114,
- 0,
- 194,
- 536,
- 388,
- 730,
- ],
- [
- 354,
- 674,
- 1130,
- 822,
- 708,
- 628,
- 856,
- 320,
- 662,
- 388,
- 730,
- 308,
- 194,
- 0,
- 342,
- 422,
- 536,
- ],
- [
- 468,
- 1016,
- 788,
- 1164,
- 1050,
- 514,
- 514,
- 662,
- 320,
- 274,
- 388,
- 650,
- 536,
- 342,
- 0,
- 764,
- 194,
- ],
- [
- 776,
- 868,
- 1552,
- 560,
- 674,
- 1050,
- 1278,
- 742,
- 1084,
- 810,
- 1152,
- 274,
- 388,
- 422,
- 764,
- 0,
- 798,
- ],
- [
- 662,
- 1210,
- 754,
- 1358,
- 1244,
- 708,
- 480,
- 856,
- 514,
- 468,
- 354,
- 844,
- 730,
- 536,
- 194,
- 798,
- 0,
- ],
- ]
- assert len(data["providers_x"]) == len(data["distance_matrix"])
- assert len(data["providers_y"]) == len(data["distance_matrix"])
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, assignment):
- """Prints assignment on console."""
- print(f"Objective: {assignment.ObjectiveValue()}")
- # Display dropped nodes.
- dropped_nodes = "Dropped nodes:"
- for node in range(routing.Size()):
- if routing.IsStart(node) or routing.IsEnd(node):
- continue
- if assignment.Value(routing.NextVar(node)) == node:
- dropped_nodes += f" {manager.IndexToNode(node)}"
- print(dropped_nodes)
- # Display routes
- total_distance = 0
- total_load_x = 0
- total_load_y = 0
- for vehicle_id in range(manager.GetNumberOfVehicles()):
- if not routing.IsVehicleUsed(assignment, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- route_load_x = 0
- route_load_y = 0
- while not routing.IsEnd(index):
- node_index = manager.IndexToNode(index)
- route_load_x += data["providers_x"][node_index]
- route_load_y += data["providers_y"][node_index]
- plan_output += f" {node_index} Load(X:{route_load_x}, Y:{route_load_y}) -> "
- previous_index = index
- previous_node_index = node_index
- index = assignment.Value(routing.NextVar(index))
- node_index = manager.IndexToNode(index)
- # route_distance += routing.GetArcCostForVehicle(previous_index, index, vehicle_id)
- route_distance += data["distance_matrix"][previous_node_index][node_index]
- node_index = manager.IndexToNode(index)
- plan_output += f" {node_index} Load({route_load_x}, {route_load_y})\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- plan_output += f"Load of the route: X:{route_load_x}, Y:{route_load_y}\n"
- print(plan_output)
- total_distance += route_distance
- total_load_x += route_load_x
- total_load_y += route_load_y
- print(f"Total Distance of all routes: {total_distance}m")
- print(f"Total load of all routes: X:{total_load_x}, Y:{total_load_y}")
- # [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]),
- data["num_vehicles"],
- data["starts"],
- data["ends"],
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
-
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- 2000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- # Minimize the longest road
- distance_dimension.SetGlobalSpanCostCoefficient(100)
-
- # [END distance_constraint]
-
- # Add Capacity constraint.
- # [START capacity_constraint]
- def demand_callback_x(from_index):
- """Returns the demand of the node."""
- # Convert from routing variable Index to demands NodeIndex.
- from_node = manager.IndexToNode(from_index)
- return data["providers_x"][from_node]
-
- demand_callback_x_index = routing.RegisterUnaryTransitCallback(demand_callback_x)
- routing.AddDimensionWithVehicleCapacity(
- demand_callback_x_index,
- 0, # null capacity slack
- data["vehicle_capacities_x"], # vehicle maximum capacities
- True, # start cumul to zero
- "Load_x",
- )
-
- def demand_callback_y(from_index):
- """Returns the demand of the node."""
- # Convert from routing variable Index to demands NodeIndex.
- from_node = manager.IndexToNode(from_index)
- return data["providers_y"][from_node]
-
- demand_callback_y_index = routing.RegisterUnaryTransitCallback(demand_callback_y)
- routing.AddDimensionWithVehicleCapacity(
- demand_callback_y_index,
- 0, # null capacity slack
- data["vehicle_capacities_y"], # vehicle maximum capacities
- True, # start cumul to zero
- "Load_y",
- )
- # [END capacity_constraint]
-
- # Add constraint at end
- solver = routing.solver()
- load_x_dim = routing.GetDimensionOrDie("Load_x")
- load_y_dim = routing.GetDimensionOrDie("Load_y")
- ends = []
- for v in range(manager.GetNumberOfVehicles()):
- ends.append(routing.End(v))
-
- node_end = data["ends"][0]
- solver.Add(
- solver.Sum([load_x_dim.CumulVar(l) for l in ends])
- >= -data["providers_x"][node_end]
- )
- solver.Add(
- solver.Sum([load_y_dim.CumulVar(l) for l in ends])
- >= -data["providers_y"][node_end]
- )
- # solver.Add(load_y_dim.CumulVar(end) >= -data['providers_y'][node_end])
-
- # Allow to freely drop any nodes.
- penalty = 0
- for node in range(0, len(data["distance_matrix"])):
- if node not in data["starts"] and node not in data["ends"]:
- routing.AddDisjunction([manager.NodeToIndex(node)], penalty)
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- # Sets a time limit; default is 100 milliseconds.
- # search_parameters.log_search = True
- search_parameters.time_limit.FromSeconds(1)
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(data, manager, routing, solution)
- else:
- print("no solution found !")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_node_max.py b/ortools/constraint_solver/samples/vrp_node_max.py
deleted file mode 100755
index 8bead8183e9..00000000000
--- a/ortools/constraint_solver/samples/vrp_node_max.py
+++ /dev/null
@@ -1,274 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Vehicles Routing Problem (VRP).
-
-Each route as an associated objective cost equal to the max node value along the
-road multiply by a constant factor (4200)
-"""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- data["value"] = [
- 0, # depot
- 42, # 1
- 42, # 2
- 8, # 3
- 8, # 4
- 8, # 5
- 8, # 6
- 8, # 7
- 8, # 8
- 8, # 9
- 8, # 10
- 8, # 11
- 8, # 12
- 8, # 13
- 8, # 14
- 42, # 15
- 42, # 16
- ]
- assert len(data["distance_matrix"]) == len(data["value"])
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
-
-# [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- max_route_distance = 0
- dim_one = routing.GetDimensionOrDie("One")
- dim_two = routing.GetDimensionOrDie("Two")
-
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- one_var = dim_one.CumulVar(index)
- one_slack_var = dim_one.SlackVar(index)
- two_var = dim_two.CumulVar(index)
- two_slack_var = dim_two.SlackVar(index)
- plan_output += (
- f" N:{manager.IndexToNode(index)}"
- f" one:({solution.Value(one_var)}, {solution.Value(one_slack_var)})"
- f" two:({solution.Value(two_var)}, {solution.Value(two_slack_var)})"
- " -> "
- )
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- one_var = dim_one.CumulVar(index)
- two_var = dim_two.CumulVar(index)
- plan_output += (
- f"N:{manager.IndexToNode(index)}"
- f" one:{solution.Value(one_var)}"
- f" two:{solution.Value(two_var)}\n"
- )
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- max_route_distance = max(route_distance, max_route_distance)
- print(f"Maximum of the route distances: {max_route_distance}m")
-
-# [END solution_printer]
-
-
-def main():
- """Solve the CVRP problem."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
-
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- 3_000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(10)
- # [END distance_constraint]
-
- # Max Node value Constraint.
- # Dimension One will be used to compute the max node value up to the node in
- # the route and store the result in the SlackVar of the node.
- routing.AddConstantDimensionWithSlack(
- 0, # transit 0
- 42 * 16, # capacity: be able to store PEAK*ROUTE_LENGTH in worst case
- 42, # slack_max: to be able to store peak in slack
- True, # Fix StartCumulToZero not really matter here
- "One",
- )
- dim_one = routing.GetDimensionOrDie("One")
-
- # Dimension Two will be used to store the max node value in the route end node
- # CumulVar so we can use it as an objective cost.
- routing.AddConstantDimensionWithSlack(
- 0, # transit 0
- 42 * 16, # capacity: be able to have PEAK value in CumulVar(End)
- 42, # slack_max: to be able to store peak in slack
- True, # Fix StartCumulToZero YES here
- "Two",
- )
- dim_two = routing.GetDimensionOrDie("Two")
-
- # force depot Slack to be value since we don't have any predecessor...
- for v in range(manager.GetNumberOfVehicles()):
- start = routing.Start(v)
- dim_one.SlackVar(start).SetValue(data["value"][0])
- routing.AddToAssignment(dim_one.SlackVar(start))
-
- dim_two.SlackVar(start).SetValue(data["value"][0])
- routing.AddToAssignment(dim_two.SlackVar(start))
-
- # Step by step relation
- # Slack(N) = max( Slack(N-1) , value(N) )
- solver = routing.solver()
- for node in range(1, 17):
- index = manager.NodeToIndex(node)
- routing.AddToAssignment(dim_one.SlackVar(index))
- routing.AddToAssignment(dim_two.SlackVar(index))
- test = []
- for v in range(manager.GetNumberOfVehicles()):
- previous_index = routing.Start(v)
- cond = routing.NextVar(previous_index) == index
- value = solver.Max(dim_one.SlackVar(previous_index), data["value"][node])
- test.append((cond * value).Var())
- for previous in range(1, 17):
- previous_index = manager.NodeToIndex(previous)
- cond = routing.NextVar(previous_index) == index
- value = solver.Max(dim_one.SlackVar(previous_index), data["value"][node])
- test.append((cond * value).Var())
- solver.Add(solver.Sum(test) == dim_one.SlackVar(index))
-
- # relation between dimensions, copy last node Slack from dim ONE to dim TWO
- for node in range(1, 17):
- index = manager.NodeToIndex(node)
- values = []
- for v in range(manager.GetNumberOfVehicles()):
- next_index = routing.End(v)
- cond = routing.NextVar(index) == next_index
- value = dim_one.SlackVar(index)
- values.append((cond * value).Var())
- solver.Add(solver.Sum(values) == dim_two.SlackVar(index))
-
- # Should force all others dim_two slack var to zero...
- for v in range(manager.GetNumberOfVehicles()):
- end = routing.End(v)
- dim_two.SetCumulVarSoftUpperBound(end, 0, 4200)
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- # search_parameters.log_search = True
- search_parameters.time_limit.FromSeconds(5)
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(data, manager, routing, solution)
- else:
- print("No solution found !")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
- # [END program]
diff --git a/ortools/constraint_solver/samples/vrp_nodes_indices.py b/ortools/constraint_solver/samples/vrp_nodes_indices.py
deleted file mode 100755
index 94ccf776126..00000000000
--- a/ortools/constraint_solver/samples/vrp_nodes_indices.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# [START program]
-"""Sample to better understand Node/Index relation.
-
-This script generate few markdown tables to better understand
-the relation between nodes and indices.
-
-Things to notice:
-* Since we have two duplicates (node 5 and node 4) solver need 2 extra indices
-to have an unique index for each vehicle start/stop and locations.
-* Solver needs to "create" an index for a vehicle 1 start since solver need an
-unique start index per vehicle.
-* All end nodes are moved to the end of the index list aka [15, 16, 17, 18].
-* routing.Size() return the number of node which are not end nodes (here 15 aka
-[0-14])
-note: using the two properties above, we know that any index in
-range(routing.Size()) is not a vehicle end node.
-
-* Since end nodes are moved to the end, their respective "empty" node index are
-reused so all locations indices are "shifted"
-e.g. node 9 is mapped to index 6
-* Same for start nodes which are moved to "empty" space
-e.g. start node 7 mapped to index 4
-
-Takeaway:
-* Allways use routing.Start(), routing.End(), manager.IndexToNode() or
-manager.NodeToIndex().
-* Location node is not necessarily equal to its index.
-* To loop through ALL indices use manager.GetNumberOfIndices() (Python) or
-manager::num_indices() (C++)
-"""
-
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-
-def main():
- """Entry point of the program."""
- locations = 17
- starts = [5, 5, 7, 8]
- ends = [1, 2, 4, 4]
- vehicles = len(starts)
- assert len(starts) == len(ends)
-
- manager = pywrapcp.RoutingIndexManager(locations, vehicles, starts, ends)
- routing = pywrapcp.RoutingModel(manager)
-
- print("Starts/Ends:")
- header = "| |"
- separator = "|---|"
- v_starts = "| start |"
- v_ends = "| end |"
- for v in range(manager.GetNumberOfVehicles()):
- header += f" vehicle {v} |"
- separator += "---|"
- v_starts += f" {starts[v]} |"
- v_ends += f" {ends[v]} |"
- print(header)
- print(separator)
- print(v_starts)
- print(v_ends)
-
- print("\nNodes:")
- print(
- "| locations | manager.GetNumberOfNodes | manager.GetNumberOfIndices |"
- " routing.nodes | routing.Size |"
- )
- print("|---|---|---|---|---|")
- print(
- f"| {locations} | {manager.GetNumberOfNodes()} |"
- f" {manager.GetNumberOfIndices()} | {routing.nodes()} |"
- f" {routing.Size()} |"
- )
-
- print("\nLocations:")
- print("| node | index | routing.IsStart | routing.IsEnd |")
- print("|---|---|---|---|")
- for node in range(manager.GetNumberOfNodes()):
- if node in starts or node in ends:
- continue
- index = manager.NodeToIndex(node)
- print(
- f"| {node} | {index} | {routing.IsStart(index)} |"
- f" {routing.IsEnd(index)} |"
- )
-
- print("\nStart/End:")
- print("| vehicle | Start/end | node | index | routing.IsStart | routing.IsEnd |")
- print("|---|---|---|---|---|---|")
- for v in range(manager.GetNumberOfVehicles()):
- start_index = routing.Start(v)
- start_node = manager.IndexToNode(start_index)
- print(
- f"| {v} | start | {start_node} | {start_index} |"
- f" {routing.IsStart(start_index)} | {routing.IsEnd(start_index)} |"
- )
- for v in range(manager.GetNumberOfVehicles()):
- end_index = routing.End(v)
- end_node = manager.IndexToNode(end_index)
- print(
- f"| {v} | end | {end_node} | {end_index} |"
- f" {routing.IsStart(end_index)} | {routing.IsEnd(end_index)} |"
- )
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_pickup_delivery.py b/ortools/constraint_solver/samples/vrp_pickup_delivery.py
deleted file mode 100755
index e5b7912d36f..00000000000
--- a/ortools/constraint_solver/samples/vrp_pickup_delivery.py
+++ /dev/null
@@ -1,178 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Pickup Delivery Problem (PDP)."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- # [START pickups_deliveries]
- data["pickups_deliveries"] = [
- [1, 6],
- [2, 10],
- [4, 3],
- [5, 9],
- [7, 8],
- [15, 11],
- [13, 12],
- [16, 14],
- ]
- # [END pickups_deliveries]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- total_distance = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} -> "
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f"{manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- total_distance += route_distance
- print(f"Total Distance of all routes: {total_distance}m")
- # [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
-
- # [END routing_model]
-
- # Define cost of each arc.
- # [START arc_cost]
- def distance_callback(from_index, to_index):
- """Returns the manhattan distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- 3000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
- # [END distance_constraint]
-
- # Define Transportation Requests.
- # [START pickup_delivery_constraint]
- for request in data["pickups_deliveries"]:
- pickup_index = manager.NodeToIndex(request[0])
- delivery_index = manager.NodeToIndex(request[1])
- routing.AddPickupAndDelivery(pickup_index, delivery_index)
- routing.solver().Add(
- routing.VehicleVar(pickup_index) == routing.VehicleVar(delivery_index)
- )
- routing.solver().Add(
- distance_dimension.CumulVar(pickup_index)
- <= distance_dimension.CumulVar(delivery_index)
- )
- # [END pickup_delivery_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(data, manager, routing, solution)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_pickup_delivery_fifo.py b/ortools/constraint_solver/samples/vrp_pickup_delivery_fifo.py
deleted file mode 100755
index 83641a21e95..00000000000
--- a/ortools/constraint_solver/samples/vrp_pickup_delivery_fifo.py
+++ /dev/null
@@ -1,181 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Pickup Delivery Problem (PDP)."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- # [START pickups_deliveries]
- data["pickups_deliveries"] = [
- [1, 6],
- [2, 10],
- [4, 3],
- [5, 9],
- [7, 8],
- [15, 11],
- [13, 12],
- [16, 14],
- ]
- # [END pickups_deliveries]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, assignment):
- """Prints assignment on console."""
- print(f"Objective: {assignment.ObjectiveValue()}")
- total_distance = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(assignment, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} -> "
- previous_index = index
- index = assignment.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f"{manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- total_distance += route_distance
- print(f"Total Distance of all routes: {total_distance}m")
- # [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
-
- # [END routing_model]
-
- # Define cost of each arc.
- # [START arc_cost]
- def distance_callback(from_index, to_index):
- """Returns the manhattan distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- 3000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
- # [END distance_constraint]
-
- # Define Transportation Requests.
- # [START pickup_delivery_constraint]
- for request in data["pickups_deliveries"]:
- pickup_index = manager.NodeToIndex(request[0])
- delivery_index = manager.NodeToIndex(request[1])
- routing.AddPickupAndDelivery(pickup_index, delivery_index)
- routing.solver().Add(
- routing.VehicleVar(pickup_index) == routing.VehicleVar(delivery_index)
- )
- routing.solver().Add(
- distance_dimension.CumulVar(pickup_index)
- <= distance_dimension.CumulVar(delivery_index)
- )
- routing.SetPickupAndDeliveryPolicyOfAllVehicles(
- pywrapcp.RoutingModel.PICKUP_AND_DELIVERY_FIFO
- )
- # [END pickup_delivery_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- assignment = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if assignment:
- print_solution(data, manager, routing, assignment)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_pickup_delivery_lifo.py b/ortools/constraint_solver/samples/vrp_pickup_delivery_lifo.py
deleted file mode 100755
index 3f9c144cf4a..00000000000
--- a/ortools/constraint_solver/samples/vrp_pickup_delivery_lifo.py
+++ /dev/null
@@ -1,181 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Pickup Delivery Problem (PDP)."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- # [START pickups_deliveries]
- data["pickups_deliveries"] = [
- [1, 6],
- [2, 10],
- [4, 3],
- [5, 9],
- [7, 8],
- [15, 11],
- [13, 12],
- [16, 14],
- ]
- # [END pickups_deliveries]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, assignment):
- """Prints assignment on console."""
- print(f"Objective: {assignment.ObjectiveValue()}")
- total_distance = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(assignment, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} -> "
- previous_index = index
- index = assignment.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f"{manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- total_distance += route_distance
- print(f"Total Distance of all routes: {total_distance}m")
- # [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
-
- # [END routing_model]
-
- # Define cost of each arc.
- # [START arc_cost]
- def distance_callback(from_index, to_index):
- """Returns the manhattan distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- 3000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
- # [END distance_constraint]
-
- # Define Transportation Requests.
- # [START pickup_delivery_constraint]
- for request in data["pickups_deliveries"]:
- pickup_index = manager.NodeToIndex(request[0])
- delivery_index = manager.NodeToIndex(request[1])
- routing.AddPickupAndDelivery(pickup_index, delivery_index)
- routing.solver().Add(
- routing.VehicleVar(pickup_index) == routing.VehicleVar(delivery_index)
- )
- routing.solver().Add(
- distance_dimension.CumulVar(pickup_index)
- <= distance_dimension.CumulVar(delivery_index)
- )
- routing.SetPickupAndDeliveryPolicyOfAllVehicles(
- pywrapcp.RoutingModel.PICKUP_AND_DELIVERY_LIFO
- )
- # [END pickup_delivery_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PARALLEL_CHEAPEST_INSERTION
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- assignment = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if assignment:
- print_solution(data, manager, routing, assignment)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_resources.py b/ortools/constraint_solver/samples/vrp_resources.py
deleted file mode 100755
index ba46a5ffccb..00000000000
--- a/ortools/constraint_solver/samples/vrp_resources.py
+++ /dev/null
@@ -1,233 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Vehicles Routing Problem (VRP) with Resource Constraints."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["time_matrix"] = [
- [0, 6, 9, 8, 7, 3, 6, 2, 3, 2, 6, 6, 4, 4, 5, 9, 7],
- [6, 0, 8, 3, 2, 6, 8, 4, 8, 8, 13, 7, 5, 8, 12, 10, 14],
- [9, 8, 0, 11, 10, 6, 3, 9, 5, 8, 4, 15, 14, 13, 9, 18, 9],
- [8, 3, 11, 0, 1, 7, 10, 6, 10, 10, 14, 6, 7, 9, 14, 6, 16],
- [7, 2, 10, 1, 0, 6, 9, 4, 8, 9, 13, 4, 6, 8, 12, 8, 14],
- [3, 6, 6, 7, 6, 0, 2, 3, 2, 2, 7, 9, 7, 7, 6, 12, 8],
- [6, 8, 3, 10, 9, 2, 0, 6, 2, 5, 4, 12, 10, 10, 6, 15, 5],
- [2, 4, 9, 6, 4, 3, 6, 0, 4, 4, 8, 5, 4, 3, 7, 8, 10],
- [3, 8, 5, 10, 8, 2, 2, 4, 0, 3, 4, 9, 8, 7, 3, 13, 6],
- [2, 8, 8, 10, 9, 2, 5, 4, 3, 0, 4, 6, 5, 4, 3, 9, 5],
- [6, 13, 4, 14, 13, 7, 4, 8, 4, 4, 0, 10, 9, 8, 4, 13, 4],
- [6, 7, 15, 6, 4, 9, 12, 5, 9, 6, 10, 0, 1, 3, 7, 3, 10],
- [4, 5, 14, 7, 6, 7, 10, 4, 8, 5, 9, 1, 0, 2, 6, 4, 8],
- [4, 8, 13, 9, 8, 7, 10, 3, 7, 4, 8, 3, 2, 0, 4, 5, 6],
- [5, 12, 9, 14, 12, 6, 6, 7, 3, 3, 4, 7, 6, 4, 0, 9, 2],
- [9, 10, 18, 6, 8, 12, 15, 8, 13, 9, 13, 3, 4, 5, 9, 0, 9],
- [7, 14, 9, 16, 14, 8, 5, 10, 6, 5, 4, 10, 8, 6, 2, 9, 0],
- ]
- data["time_windows"] = [
- (0, 5), # depot
- (7, 12), # 1
- (10, 15), # 2
- (5, 14), # 3
- (5, 13), # 4
- (0, 5), # 5
- (5, 10), # 6
- (0, 10), # 7
- (5, 10), # 8
- (0, 5), # 9
- (10, 16), # 10
- (10, 15), # 11
- (0, 5), # 12
- (5, 10), # 13
- (7, 12), # 14
- (10, 15), # 15
- (5, 15), # 16
- ]
- data["num_vehicles"] = 4
- # [START resources_data]
- data["vehicle_load_time"] = 5
- data["vehicle_unload_time"] = 5
- data["depot_capacity"] = 2
- # [END resources_data]
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- time_dimension = routing.GetDimensionOrDie("Time")
- total_time = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- while not routing.IsEnd(index):
- time_var = time_dimension.CumulVar(index)
- plan_output += (
- f"{manager.IndexToNode(index)}"
- f" Time({solution.Min(time_var)}, {solution.Max(time_var)})"
- " -> "
- )
- index = solution.Value(routing.NextVar(index))
- time_var = time_dimension.CumulVar(index)
- plan_output += (
- f"{manager.IndexToNode(index)}"
- f" Time({solution.Min(time_var)},{solution.Max(time_var)})\n"
- )
- plan_output += f"Time of the route: {solution.Min(time_var)}min\n"
- print(plan_output)
- total_time += solution.Min(time_var)
- print(f"Total time of all routes: {total_time}min")
- # [END solution_printer]
-
-
-def main():
- """Solve the VRP with time windows."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["time_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def time_callback(from_index, to_index):
- """Returns the travel time between the two nodes."""
- # Convert from routing variable Index to time matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["time_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(time_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Time Windows constraint.
- # [START time_windows_constraint]
- time = "Time"
- routing.AddDimension(
- transit_callback_index,
- 60, # allow waiting time
- 60, # maximum time per vehicle
- False, # Don't force start cumul to zero.
- time,
- )
- time_dimension = routing.GetDimensionOrDie(time)
- # Add time window constraints for each location except depot.
- for location_idx, time_window in enumerate(data["time_windows"]):
- if location_idx == 0:
- continue
- index = manager.NodeToIndex(location_idx)
- time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
- # Add time window constraints for each vehicle start node.
- for vehicle_id in range(data["num_vehicles"]):
- index = routing.Start(vehicle_id)
- time_dimension.CumulVar(index).SetRange(
- data["time_windows"][0][0], data["time_windows"][0][1]
- )
- # [END time_windows_constraint]
-
- # Add resource constraints at the depot.
- # [START depot_load_time]
- solver = routing.solver()
- intervals = []
- for i in range(data["num_vehicles"]):
- # Add time windows at start of routes
- intervals.append(
- solver.FixedDurationIntervalVar(
- time_dimension.CumulVar(routing.Start(i)),
- data["vehicle_load_time"],
- "depot_interval",
- )
- )
- # Add time windows at end of routes.
- intervals.append(
- solver.FixedDurationIntervalVar(
- time_dimension.CumulVar(routing.End(i)),
- data["vehicle_unload_time"],
- "depot_interval",
- )
- )
- # [END depot_load_time]
-
- # [START depot_capacity]
- depot_usage = [1 for _ in range(len(intervals))]
- solver.Add(
- solver.Cumulative(intervals, depot_usage, data["depot_capacity"], "depot")
- )
- # [END depot_capacity]
-
- # Instantiate route start and end times to produce feasible times.
- # [START depot_start_end_times]
- for i in range(data["num_vehicles"]):
- routing.AddVariableMinimizedByFinalizer(
- time_dimension.CumulVar(routing.Start(i))
- )
- routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))
- # [END depot_start_end_times]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(data, manager, routing, solution)
- # [END print_solution]
- else:
- print("No solution found !")
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_solution_callback.py b/ortools/constraint_solver/samples/vrp_solution_callback.py
deleted file mode 100755
index a95dcb3ac3b..00000000000
--- a/ortools/constraint_solver/samples/vrp_solution_callback.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Vehicles Routing Problem (VRP).
-
-This is a sample using the routing library python wrapper to solve a VRP
-problem.
-
-The solver stop after improving its solution 15 times or after 5 seconds.
-
-Distances are in meters.
-"""
-
-# [START import]
-import weakref
-
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_callback_printer]
-def print_solution(
- routing_manager: pywrapcp.RoutingIndexManager, routing_model: pywrapcp.RoutingModel
-):
- """Prints solution on console."""
- print("################")
- print(f"Solution objective: {routing_model.CostVar().Value()}")
- total_distance = 0
- for vehicle_id in range(routing_manager.GetNumberOfVehicles()):
- index = routing_model.Start(vehicle_id)
- if routing_model.IsEnd(routing_model.NextVar(index).Value()):
- continue
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- while not routing_model.IsEnd(index):
- plan_output += f" {routing_manager.IndexToNode(index)} ->"
- previous_index = index
- index = routing_model.NextVar(index).Value()
- route_distance += routing_model.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f" {routing_manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- total_distance += route_distance
- print(f"Total Distance of all routes: {total_distance}m")
-
-# [END solution_callback_printer]
-
-
-# [START solution_callback]
-class SolutionCallback:
- """Create a solution callback."""
-
- def __init__(
- self,
- manager: pywrapcp.RoutingIndexManager,
- model: pywrapcp.RoutingModel,
- limit: int,
- ):
- # We need a weak ref on the routing model to avoid a cycle.
- self._routing_manager_ref = weakref.ref(manager)
- self._routing_model_ref = weakref.ref(model)
- self._counter = 0
- self._counter_limit = limit
- self.objectives = []
-
- def __call__(self):
- objective = int(
- self._routing_model_ref().CostVar().Value()
- ) # pytype: disable=attribute-error
- if not self.objectives or objective < self.objectives[-1]:
- self.objectives.append(objective)
- print_solution(self._routing_manager_ref(), self._routing_model_ref())
- self._counter += 1
- if self._counter > self._counter_limit:
- self._routing_model_ref().solver().FinishCurrentSearch()
-
-
-# [END solution_callback]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- routing_manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing_model = pywrapcp.RoutingModel(routing_manager)
-
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = routing_manager.IndexToNode(from_index)
- to_node = routing_manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing_model.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing_model.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing_model.AddDimension(
- transit_callback_index,
- 0, # no slack
- 3000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing_model.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
- # [END distance_constraint]
-
- # Attach a solution callback.
- # [START attach_callback]
- solution_callback = SolutionCallback(routing_manager, routing_model, 15)
- routing_model.AddAtSolutionCallback(solution_callback)
- # [END attach_callback]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.time_limit.FromSeconds(5)
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing_model.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print(f"Best objective: {solution_callback.objectives[-1]}")
- else:
- print("No solution found !")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_starts_ends.py b/ortools/constraint_solver/samples/vrp_starts_ends.py
deleted file mode 100755
index 5342a78a268..00000000000
--- a/ortools/constraint_solver/samples/vrp_starts_ends.py
+++ /dev/null
@@ -1,157 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Simple Vehicles Routing Problem."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["distance_matrix"] = [
- # fmt: off
- [0, 548, 776, 696, 582, 274, 502, 194, 308, 194, 536, 502, 388, 354, 468, 776, 662],
- [548, 0, 684, 308, 194, 502, 730, 354, 696, 742, 1084, 594, 480, 674, 1016, 868, 1210],
- [776, 684, 0, 992, 878, 502, 274, 810, 468, 742, 400, 1278, 1164, 1130, 788, 1552, 754],
- [696, 308, 992, 0, 114, 650, 878, 502, 844, 890, 1232, 514, 628, 822, 1164, 560, 1358],
- [582, 194, 878, 114, 0, 536, 764, 388, 730, 776, 1118, 400, 514, 708, 1050, 674, 1244],
- [274, 502, 502, 650, 536, 0, 228, 308, 194, 240, 582, 776, 662, 628, 514, 1050, 708],
- [502, 730, 274, 878, 764, 228, 0, 536, 194, 468, 354, 1004, 890, 856, 514, 1278, 480],
- [194, 354, 810, 502, 388, 308, 536, 0, 342, 388, 730, 468, 354, 320, 662, 742, 856],
- [308, 696, 468, 844, 730, 194, 194, 342, 0, 274, 388, 810, 696, 662, 320, 1084, 514],
- [194, 742, 742, 890, 776, 240, 468, 388, 274, 0, 342, 536, 422, 388, 274, 810, 468],
- [536, 1084, 400, 1232, 1118, 582, 354, 730, 388, 342, 0, 878, 764, 730, 388, 1152, 354],
- [502, 594, 1278, 514, 400, 776, 1004, 468, 810, 536, 878, 0, 114, 308, 650, 274, 844],
- [388, 480, 1164, 628, 514, 662, 890, 354, 696, 422, 764, 114, 0, 194, 536, 388, 730],
- [354, 674, 1130, 822, 708, 628, 856, 320, 662, 388, 730, 308, 194, 0, 342, 422, 536],
- [468, 1016, 788, 1164, 1050, 514, 514, 662, 320, 274, 388, 650, 536, 342, 0, 764, 194],
- [776, 868, 1552, 560, 674, 1050, 1278, 742, 1084, 810, 1152, 274, 388, 422, 764, 0, 798],
- [662, 1210, 754, 1358, 1244, 708, 480, 856, 514, 468, 354, 844, 730, 536, 194, 798, 0],
- # fmt: on
- ]
- data["num_vehicles"] = 4
- # [START starts_ends]
- data["starts"] = [1, 2, 15, 16]
- data["ends"] = [0, 0, 0, 0]
- # [END starts_ends]
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- max_route_distance = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} -> "
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f"{manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- max_route_distance = max(route_distance, max_route_distance)
- print(f"Maximum of the route distances: {max_route_distance}m")
- # [END solution_printer]
-
-
-def main():
- """Entry point of the program."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["distance_matrix"]), data["num_vehicles"], data["starts"], data["ends"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- # Convert from routing variable Index to distance matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["distance_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- 2000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
- # [END distance_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(data, manager, routing, solution)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
- # [END program]
diff --git a/ortools/constraint_solver/samples/vrp_time_windows.py b/ortools/constraint_solver/samples/vrp_time_windows.py
deleted file mode 100755
index 80fac449b24..00000000000
--- a/ortools/constraint_solver/samples/vrp_time_windows.py
+++ /dev/null
@@ -1,197 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Vehicles Routing Problem (VRP) with Time Windows."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["time_matrix"] = [
- [0, 6, 9, 8, 7, 3, 6, 2, 3, 2, 6, 6, 4, 4, 5, 9, 7],
- [6, 0, 8, 3, 2, 6, 8, 4, 8, 8, 13, 7, 5, 8, 12, 10, 14],
- [9, 8, 0, 11, 10, 6, 3, 9, 5, 8, 4, 15, 14, 13, 9, 18, 9],
- [8, 3, 11, 0, 1, 7, 10, 6, 10, 10, 14, 6, 7, 9, 14, 6, 16],
- [7, 2, 10, 1, 0, 6, 9, 4, 8, 9, 13, 4, 6, 8, 12, 8, 14],
- [3, 6, 6, 7, 6, 0, 2, 3, 2, 2, 7, 9, 7, 7, 6, 12, 8],
- [6, 8, 3, 10, 9, 2, 0, 6, 2, 5, 4, 12, 10, 10, 6, 15, 5],
- [2, 4, 9, 6, 4, 3, 6, 0, 4, 4, 8, 5, 4, 3, 7, 8, 10],
- [3, 8, 5, 10, 8, 2, 2, 4, 0, 3, 4, 9, 8, 7, 3, 13, 6],
- [2, 8, 8, 10, 9, 2, 5, 4, 3, 0, 4, 6, 5, 4, 3, 9, 5],
- [6, 13, 4, 14, 13, 7, 4, 8, 4, 4, 0, 10, 9, 8, 4, 13, 4],
- [6, 7, 15, 6, 4, 9, 12, 5, 9, 6, 10, 0, 1, 3, 7, 3, 10],
- [4, 5, 14, 7, 6, 7, 10, 4, 8, 5, 9, 1, 0, 2, 6, 4, 8],
- [4, 8, 13, 9, 8, 7, 10, 3, 7, 4, 8, 3, 2, 0, 4, 5, 6],
- [5, 12, 9, 14, 12, 6, 6, 7, 3, 3, 4, 7, 6, 4, 0, 9, 2],
- [9, 10, 18, 6, 8, 12, 15, 8, 13, 9, 13, 3, 4, 5, 9, 0, 9],
- [7, 14, 9, 16, 14, 8, 5, 10, 6, 5, 4, 10, 8, 6, 2, 9, 0],
- ]
- data["time_windows"] = [
- (0, 5), # depot
- (7, 12), # 1
- (10, 15), # 2
- (16, 18), # 3
- (10, 13), # 4
- (0, 5), # 5
- (5, 10), # 6
- (0, 4), # 7
- (5, 10), # 8
- (0, 3), # 9
- (10, 16), # 10
- (10, 15), # 11
- (0, 5), # 12
- (5, 10), # 13
- (7, 8), # 14
- (10, 15), # 15
- (11, 15), # 16
- ]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(data, manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- time_dimension = routing.GetDimensionOrDie("Time")
- total_time = 0
- for vehicle_id in range(data["num_vehicles"]):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- while not routing.IsEnd(index):
- time_var = time_dimension.CumulVar(index)
- plan_output += (
- f"{manager.IndexToNode(index)}"
- f" Time({solution.Min(time_var)},{solution.Max(time_var)})"
- " -> "
- )
- index = solution.Value(routing.NextVar(index))
- time_var = time_dimension.CumulVar(index)
- plan_output += (
- f"{manager.IndexToNode(index)}"
- f" Time({solution.Min(time_var)},{solution.Max(time_var)})\n"
- )
- plan_output += f"Time of the route: {solution.Min(time_var)}min\n"
- print(plan_output)
- total_time += solution.Min(time_var)
- print(f"Total time of all routes: {total_time}min")
- # [END solution_printer]
-
-
-def main():
- """Solve the VRP with time windows."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["time_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def time_callback(from_index, to_index):
- """Returns the travel time between the two nodes."""
- # Convert from routing variable Index to time matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["time_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(time_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Time Windows constraint.
- # [START time_windows_constraint]
- time = "Time"
- routing.AddDimension(
- transit_callback_index,
- 30, # allow waiting time
- 30, # maximum time per vehicle
- False, # Don't force start cumul to zero.
- time,
- )
- time_dimension = routing.GetDimensionOrDie(time)
- # Add time window constraints for each location except depot.
- for location_idx, time_window in enumerate(data["time_windows"]):
- if location_idx == data["depot"]:
- continue
- index = manager.NodeToIndex(location_idx)
- time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
- # Add time window constraints for each vehicle start node.
- depot_idx = data["depot"]
- for vehicle_id in range(data["num_vehicles"]):
- index = routing.Start(vehicle_id)
- time_dimension.CumulVar(index).SetRange(
- data["time_windows"][depot_idx][0], data["time_windows"][depot_idx][1]
- )
- # [END time_windows_constraint]
-
- # Instantiate route start and end times to produce feasible times.
- # [START depot_start_end_times]
- for i in range(data["num_vehicles"]):
- routing.AddVariableMinimizedByFinalizer(
- time_dimension.CumulVar(routing.Start(i))
- )
- routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))
- # [END depot_start_end_times]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(data, manager, routing, solution)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_time_windows_per_vehicles.py b/ortools/constraint_solver/samples/vrp_time_windows_per_vehicles.py
deleted file mode 100755
index c814af54ccd..00000000000
--- a/ortools/constraint_solver/samples/vrp_time_windows_per_vehicles.py
+++ /dev/null
@@ -1,258 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# [START program]
-"""Vehicles Routing Problem (VRP) with Time Window (TW) per vehicle.
-
-All time are in minutes using 0am as origin
-e.g. 8am = 480, 11am = 660, 1pm = 780 ...
-
-We have 1 depot (0) and 16 locations (1-16).
-We have a fleet of 4 vehicles (0-3) whose working time is [480, 1020] (8am-5pm)
-We have the distance matrix between these locations and depot.
-We have a service time of 25min at each location.
-
-Locations are duplicated so we can simulate a TW per vehicle.
-location: [01-16] vehicle: 0 TW: [540, 660] (9am-11am)
-location: [17-32] vehicle: 1 TW: [660, 780] (11am-1pm)
-location: [33-48] vehicle: 2 TW: [780, 900] (1pm-3pm)
-location: [49-64] vehicle: 3 TW: [900, 1020] (3pm-5pm)
-"""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["time_matrix"] = [
- [0, 6, 9, 8, 7, 3, 6, 2, 3, 2, 6, 6, 4, 4, 5, 9, 7],
- [6, 0, 8, 3, 2, 6, 8, 4, 8, 8, 13, 7, 5, 8, 12, 10, 14],
- [9, 8, 0, 11, 10, 6, 3, 9, 5, 8, 4, 15, 14, 13, 9, 18, 9],
- [8, 3, 11, 0, 1, 7, 10, 6, 10, 10, 14, 6, 7, 9, 14, 6, 16],
- [7, 2, 10, 1, 0, 6, 9, 4, 8, 9, 13, 4, 6, 8, 12, 8, 14],
- [3, 6, 6, 7, 6, 0, 2, 3, 2, 2, 7, 9, 7, 7, 6, 12, 8],
- [6, 8, 3, 10, 9, 2, 0, 6, 2, 5, 4, 12, 10, 10, 6, 15, 5],
- [2, 4, 9, 6, 4, 3, 6, 0, 4, 4, 8, 5, 4, 3, 7, 8, 10],
- [3, 8, 5, 10, 8, 2, 2, 4, 0, 3, 4, 9, 8, 7, 3, 13, 6],
- [2, 8, 8, 10, 9, 2, 5, 4, 3, 0, 4, 6, 5, 4, 3, 9, 5],
- [6, 13, 4, 14, 13, 7, 4, 8, 4, 4, 0, 10, 9, 8, 4, 13, 4],
- [6, 7, 15, 6, 4, 9, 12, 5, 9, 6, 10, 0, 1, 3, 7, 3, 10],
- [4, 5, 14, 7, 6, 7, 10, 4, 8, 5, 9, 1, 0, 2, 6, 4, 8],
- [4, 8, 13, 9, 8, 7, 10, 3, 7, 4, 8, 3, 2, 0, 4, 5, 6],
- [5, 12, 9, 14, 12, 6, 6, 7, 3, 3, 4, 7, 6, 4, 0, 9, 2],
- [9, 10, 18, 6, 8, 12, 15, 8, 13, 9, 13, 3, 4, 5, 9, 0, 9],
- [7, 14, 9, 16, 14, 8, 5, 10, 6, 5, 4, 10, 8, 6, 2, 9, 0],
- ]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
- # [END data_model]
-
-
-# [START solution_printer]
-def print_solution(manager, routing, assignment):
- """Prints solution on console."""
- print(f"Objective: {assignment.ObjectiveValue()}")
- # Display dropped nodes.
- dropped_nodes = "Dropped nodes:"
- for index in range(routing.Size()):
- if routing.IsStart(index) or routing.IsEnd(index):
- continue
- if assignment.Value(routing.NextVar(index)) == index:
- node = manager.IndexToNode(index)
- if node > 16:
- original = node
- while original > 16:
- original = original - 16
- dropped_nodes += f" {node}({original})"
- else:
- dropped_nodes += f" {node}"
- print(dropped_nodes)
- # Display routes
- time_dimension = routing.GetDimensionOrDie("Time")
- total_time = 0
- for vehicle_id in range(manager.GetNumberOfVehicles()):
- if not routing.IsVehicleUsed(assignment, vehicle_id):
- continue
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- index = routing.Start(vehicle_id)
- start_time = 0
- while not routing.IsEnd(index):
- time_var = time_dimension.CumulVar(index)
- node = manager.IndexToNode(index)
- if node > 16:
- original = node
- while original > 16:
- original = original - 16
- plan_output += f"{node}({original})"
- else:
- plan_output += f"{node}"
- plan_output += f" Time:{assignment.Value(time_var)} -> "
- if start_time == 0:
- start_time = assignment.Value(time_var)
- index = assignment.Value(routing.NextVar(index))
- time_var = time_dimension.CumulVar(index)
- node = manager.IndexToNode(index)
- plan_output += f"{node} Time:{assignment.Value(time_var)}\n"
- end_time = assignment.Value(time_var)
- duration = end_time - start_time
- plan_output += f"Duration of the route:{duration}min\n"
- print(plan_output)
- total_time += duration
- print(f"Total duration of all routes: {total_time}min")
- # [END solution_printer]
-
-
-def main():
- """Solve the VRP with time windows."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- 1 + 16 * 4, data["num_vehicles"], data["depot"] # number of locations
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
-
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def time_callback(from_index, to_index):
- """Returns the travel time between the two nodes."""
- # Convert from routing variable Index to time matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- # since our matrix is 17x17 map duplicated node to original one to
- # retrieve the travel time
- while from_node > 16:
- from_node = from_node - 16
- while to_node > 16:
- to_node = to_node - 16
- # add service of 25min for each location (except depot)
- service_time = 0
- if from_node != data["depot"]:
- service_time = 25
- return data["time_matrix"][from_node][to_node] + service_time
-
- transit_callback_index = routing.RegisterTransitCallback(time_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Time Windows constraint.
- # [START time_windows_constraint]
- time = "Time"
- routing.AddDimension(
- transit_callback_index,
- 0, # allow waiting time (0 min)
- 1020, # maximum time per vehicle (9 hours)
- False, # Don't force start cumul to zero.
- time,
- )
- time_dimension = routing.GetDimensionOrDie(time)
- # Add time window constraints for each location except depot.
- for location_idx in range(17):
- if location_idx == data["depot"]:
- continue
- # Vehicle 0 location TW: [9am, 11am]
- index_0 = manager.NodeToIndex(location_idx)
- time_dimension.CumulVar(index_0).SetRange(540, 660)
- routing.VehicleVar(index_0).SetValues([-1, 0])
-
- # Vehicle 1 location TW: [11am, 1pm]
- index_1 = manager.NodeToIndex(location_idx + 16 * 1)
- time_dimension.CumulVar(index_1).SetRange(660, 780)
- routing.VehicleVar(index_1).SetValues([-1, 1])
-
- # Vehicle 2 location TW: [1pm, 3pm]
- index_2 = manager.NodeToIndex(location_idx + 16 * 2)
- time_dimension.CumulVar(index_2).SetRange(780, 900)
- routing.VehicleVar(index_2).SetValues([-1, 2])
-
- # Vehicle 3 location TW: [3pm, 5pm]
- index_3 = manager.NodeToIndex(location_idx + 16 * 3)
- time_dimension.CumulVar(index_3).SetRange(900, 1020)
- routing.VehicleVar(index_3).SetValues([-1, 3])
-
- # Add Disjunction so only one node among duplicate is visited
- penalty = 100_000 # Give solver strong incentive to visit one node
- routing.AddDisjunction([index_0, index_1, index_2, index_3], penalty, 1)
-
- # Add time window constraints for each vehicle start node.
- depot_idx = data["depot"]
- for vehicle_id in range(data["num_vehicles"]):
- index = routing.Start(vehicle_id)
- time_dimension.CumulVar(index).SetRange(480, 1020) # (8am, 5pm)
-
- # Add time window constraints for each vehicle end node.
- depot_idx = data["depot"]
- for vehicle_id in range(data["num_vehicles"]):
- index = routing.End(vehicle_id)
- time_dimension.CumulVar(index).SetRange(480, 1020) # (8am, 5pm)
- # [END time_windows_constraint]
-
- # Instantiate route start and end times to produce feasible times.
- # [START depot_start_end_times]
- for i in range(data["num_vehicles"]):
- routing.AddVariableMinimizedByFinalizer(
- time_dimension.CumulVar(routing.Start(i))
- )
- routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))
- # [END depot_start_end_times]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.time_limit.FromSeconds(1)
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- assignment = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if assignment:
- print_solution(manager, routing, assignment)
- else:
- print("no solution found !")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrp_tokens.py b/ortools/constraint_solver/samples/vrp_tokens.py
deleted file mode 100755
index e5ede08333f..00000000000
--- a/ortools/constraint_solver/samples/vrp_tokens.py
+++ /dev/null
@@ -1,181 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-"""Simple VRP with special locations which need to be visited at end of the route."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- # Special location don't consume token, while regular one consume one
- data["tokens"] = [
- 0, # 0 depot
- 0, # 1 special node
- 0, # 2 special node
- 0, # 3 special node
- 0, # 4 special node
- 0, # 5 special node
- -1, # 6
- -1, # 7
- -1, # 8
- -1, # 9
- -1, # 10
- -1, # 11
- -1, # 12
- -1, # 13
- -1, # 14
- -1, # 15
- -1, # 16
- -1, # 17
- -1, # 18
- ]
- # just need to be big enough, not a limiting factor
- data["vehicle_tokens"] = [20, 20, 20, 20]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
-
-
-def print_solution(manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- token_dimension = routing.GetDimensionOrDie("Token")
- total_distance = 0
- total_token = 0
- for vehicle_id in range(manager.GetNumberOfVehicles()):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- index = routing.Start(vehicle_id)
- total_token += solution.Value(token_dimension.CumulVar(index))
- route_distance = 0
- route_token = 0
- while not routing.IsEnd(index):
- node_index = manager.IndexToNode(index)
- token_var = token_dimension.CumulVar(index)
- route_token = solution.Value(token_var)
- plan_output += f" {node_index} Token({route_token}) -> "
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- node_index = manager.IndexToNode(index)
- token_var = token_dimension.CumulVar(index)
- route_token = solution.Value(token_var)
- plan_output += f" {node_index} Token({route_token})\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- total_distance += route_distance
- print(plan_output)
- print(f"Total distance of all routes: {total_distance}m")
- print(f"Total token of all routes: {total_token}")
-
-
-def main():
- """Solve the CVRP problem."""
- # Instantiate the data problem.
- data = create_data_model()
-
- # Create the routing index manager.
- manager = pywrapcp.RoutingIndexManager(
- len(data["tokens"]), data["num_vehicles"], data["depot"]
- )
-
- # Create Routing Model.
- routing = pywrapcp.RoutingModel(manager)
-
- # Create and register a transit callback.
- def distance_callback(from_index, to_index):
- """Returns the distance between the two nodes."""
- del from_index
- del to_index
- return 10
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
-
- routing.AddDimension(
- transit_callback_index,
- 0, # null slack
- 3000, # maximum distance per vehicle
- True, # start cumul to zero
- "distance",
- )
- distance_dimension = routing.GetDimensionOrDie("distance")
- distance_dimension.SetGlobalSpanCostCoefficient(100)
-
- # Define cost of each arc.
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
-
- # Add Token constraint.
- def token_callback(from_index):
- """Returns the number of token consumed by the node."""
- # Convert from routing variable Index to tokens NodeIndex.
- from_node = manager.IndexToNode(from_index)
- return data["tokens"][from_node]
-
- token_callback_index = routing.RegisterUnaryTransitCallback(token_callback)
- routing.AddDimensionWithVehicleCapacity(
- token_callback_index,
- 0, # null capacity slack
- data["vehicle_tokens"], # vehicle maximum tokens
- False, # start cumul to zero
- "Token",
- )
- # Add constraint: special node can only be visited if token remaining is zero
- token_dimension = routing.GetDimensionOrDie("Token")
- for node in range(1, 6):
- index = manager.NodeToIndex(node)
- routing.solver().Add(token_dimension.CumulVar(index) == 0)
-
- # Instantiate route start and end times to produce feasible times.
- # [START depot_start_end_times]
- for i in range(manager.GetNumberOfVehicles()):
- routing.AddVariableMinimizedByFinalizer(
- token_dimension.CumulVar(routing.Start(i))
- )
- routing.AddVariableMinimizedByFinalizer(
- token_dimension.CumulVar(routing.End(i))
- )
- # [END depot_start_end_times]
-
- # Setting first solution heuristic.
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.time_limit.FromSeconds(1)
-
- # Solve the problem.
- solution = routing.SolveWithParameters(search_parameters)
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(manager, routing, solution)
- else:
- print("No solution found !")
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
diff --git a/ortools/constraint_solver/samples/vrp_with_time_limit.py b/ortools/constraint_solver/samples/vrp_with_time_limit.py
deleted file mode 100755
index fb286ed2756..00000000000
--- a/ortools/constraint_solver/samples/vrp_with_time_limit.py
+++ /dev/null
@@ -1,127 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""Vehicles Routing Problem (VRP)."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START solution_printer]
-def print_solution(manager, routing, solution):
- """Prints solution on console."""
- print(f"Objective: {solution.ObjectiveValue()}")
- max_route_distance = 0
- for vehicle_id in range(manager.GetNumberOfVehicles()):
- if not routing.IsVehicleUsed(solution, vehicle_id):
- continue
- index = routing.Start(vehicle_id)
- plan_output = f"Route for vehicle {vehicle_id}:\n"
- route_distance = 0
- while not routing.IsEnd(index):
- plan_output += f" {manager.IndexToNode(index)} -> "
- previous_index = index
- index = solution.Value(routing.NextVar(index))
- route_distance += routing.GetArcCostForVehicle(
- previous_index, index, vehicle_id
- )
- plan_output += f"{manager.IndexToNode(index)}\n"
- plan_output += f"Distance of the route: {route_distance}m\n"
- print(plan_output)
- max_route_distance = max(route_distance, max_route_distance)
- print(f"Maximum of the route distances: {max_route_distance}m")
- # [END solution_printer]
-
-
-def main():
- """Solve the CVRP problem."""
- # Instantiate the data problem.
- # [START data]
- num_locations = 20
- num_vehicles = 5
- depot = 0
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(num_locations, num_vehicles, depot)
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
-
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def distance_callback(from_index, to_index):
- # pylint: disable=unused-argument
- """Returns the distance between the two nodes."""
- return 1
-
- transit_callback_index = routing.RegisterTransitCallback(distance_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Distance constraint.
- # [START distance_constraint]
- dimension_name = "Distance"
- routing.AddDimension(
- transit_callback_index,
- 0, # no slack
- 3000, # vehicle maximum travel distance
- True, # start cumul to zero
- dimension_name,
- )
- distance_dimension = routing.GetDimensionOrDie(dimension_name)
- distance_dimension.SetGlobalSpanCostCoefficient(100)
- # [END distance_constraint]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- search_parameters.local_search_metaheuristic = (
- routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
- )
- search_parameters.log_search = True
- search_parameters.time_limit.FromSeconds(5)
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution on console.
- # [START print_solution]
- if solution:
- print_solution(manager, routing, solution)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program]
diff --git a/ortools/constraint_solver/samples/vrptw_store_solution_data.py b/ortools/constraint_solver/samples/vrptw_store_solution_data.py
deleted file mode 100755
index 6abc76ce837..00000000000
--- a/ortools/constraint_solver/samples/vrptw_store_solution_data.py
+++ /dev/null
@@ -1,253 +0,0 @@
-#!/usr/bin/env python3
-# Copyright 2010-2025 Google LLC
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# [START program]
-"""VRPTW example that stores routes and cumulative data in an array."""
-
-# [START import]
-from ortools.constraint_solver import routing_enums_pb2
-from ortools.constraint_solver import pywrapcp
-
-# [END import]
-
-
-# [START program_part1]
-# [START data_model]
-def create_data_model():
- """Stores the data for the problem."""
- data = {}
- data["time_matrix"] = [
- [0, 6, 9, 8, 7, 3, 6, 2, 3, 2, 6, 6, 4, 4, 5, 9, 7],
- [6, 0, 8, 3, 2, 6, 8, 4, 8, 8, 13, 7, 5, 8, 12, 10, 14],
- [9, 8, 0, 11, 10, 6, 3, 9, 5, 8, 4, 15, 14, 13, 9, 18, 9],
- [8, 3, 11, 0, 1, 7, 10, 6, 10, 10, 14, 6, 7, 9, 14, 6, 16],
- [7, 2, 10, 1, 0, 6, 9, 4, 8, 9, 13, 4, 6, 8, 12, 8, 14],
- [3, 6, 6, 7, 6, 0, 2, 3, 2, 2, 7, 9, 7, 7, 6, 12, 8],
- [6, 8, 3, 10, 9, 2, 0, 6, 2, 5, 4, 12, 10, 10, 6, 15, 5],
- [2, 4, 9, 6, 4, 3, 6, 0, 4, 4, 8, 5, 4, 3, 7, 8, 10],
- [3, 8, 5, 10, 8, 2, 2, 4, 0, 3, 4, 9, 8, 7, 3, 13, 6],
- [2, 8, 8, 10, 9, 2, 5, 4, 3, 0, 4, 6, 5, 4, 3, 9, 5],
- [6, 13, 4, 14, 13, 7, 4, 8, 4, 4, 0, 10, 9, 8, 4, 13, 4],
- [6, 7, 15, 6, 4, 9, 12, 5, 9, 6, 10, 0, 1, 3, 7, 3, 10],
- [4, 5, 14, 7, 6, 7, 10, 4, 8, 5, 9, 1, 0, 2, 6, 4, 8],
- [4, 8, 13, 9, 8, 7, 10, 3, 7, 4, 8, 3, 2, 0, 4, 5, 6],
- [5, 12, 9, 14, 12, 6, 6, 7, 3, 3, 4, 7, 6, 4, 0, 9, 2],
- [9, 10, 18, 6, 8, 12, 15, 8, 13, 9, 13, 3, 4, 5, 9, 0, 9],
- [7, 14, 9, 16, 14, 8, 5, 10, 6, 5, 4, 10, 8, 6, 2, 9, 0],
- ]
- data["time_windows"] = [
- (0, 5), # depot
- (7, 12), # 1
- (10, 15), # 2
- (16, 18), # 3
- (10, 13), # 4
- (0, 5), # 5
- (5, 10), # 6
- (0, 4), # 7
- (5, 10), # 8
- (0, 3), # 9
- (10, 16), # 10
- (10, 15), # 11
- (0, 5), # 12
- (5, 10), # 13
- (7, 8), # 14
- (10, 15), # 15
- (11, 15), # 16
- ]
- data["num_vehicles"] = 4
- data["depot"] = 0
- return data
-
-# [END data_model]
-
-
-# [START solution_printer]
-def print_solution(routes, cumul_data):
- """Print the solution."""
- total_time = 0
- route_str = ""
- for i, route in enumerate(routes):
- if len(route) <= 2:
- continue
- route_str += "Route " + str(i) + ":\n"
- start_time = cumul_data[i][0][0]
- end_time = cumul_data[i][0][1]
- route_str += (
- " "
- + str(route[0])
- + " Time("
- + str(start_time)
- + ", "
- + str(end_time)
- + ")"
- )
- for j in range(1, len(route)):
- start_time = cumul_data[i][j][0]
- end_time = cumul_data[i][j][1]
- route_str += (
- " -> "
- + str(route[j])
- + " Time("
- + str(start_time)
- + ", "
- + str(end_time)
- + ")"
- )
- route_str += f"\n Route time: {start_time}min\n\n"
- total_time += cumul_data[i][len(route) - 1][0]
- route_str += f"Total time: {total_time}min"
- print(route_str)
-
-# [END solution_printer]
-
-
-# [START get_routes]
-def get_routes(solution, routing, manager):
- """Get vehicle routes from a solution and store them in an array."""
- # Get vehicle routes and store them in a two dimensional array whose
- # i,j entry is the jth location visited by vehicle i along its route.
- routes = []
- for route_nbr in range(routing.vehicles()):
- index = routing.Start(route_nbr)
- route = [manager.IndexToNode(index)]
- while not routing.IsEnd(index):
- index = solution.Value(routing.NextVar(index))
- route.append(manager.IndexToNode(index))
- routes.append(route)
- return routes
-
-# [END get_routes]
-
-
-# [START get_cumulative_data]
-def get_cumul_data(solution, routing, dimension):
- """Get cumulative data from a dimension and store it in an array."""
- # Returns an array cumul_data whose i,j entry contains the minimum and
- # maximum of CumulVar for the dimension at the jth node on route :
- # - cumul_data[i][j][0] is the minimum.
- # - cumul_data[i][j][1] is the maximum.
-
- cumul_data = []
- for route_nbr in range(routing.vehicles()):
- route_data = []
- index = routing.Start(route_nbr)
- dim_var = dimension.CumulVar(index)
- route_data.append([solution.Min(dim_var), solution.Max(dim_var)])
- while not routing.IsEnd(index):
- index = solution.Value(routing.NextVar(index))
- dim_var = dimension.CumulVar(index)
- route_data.append([solution.Min(dim_var), solution.Max(dim_var)])
- cumul_data.append(route_data)
- return cumul_data
-
-# [END get_cumulative_data]
-
-
-def main():
- """Solve the VRP with time windows."""
- # Instantiate the data problem.
- # [START data]
- data = create_data_model()
- # [END data]
-
- # Create the routing index manager.
- # [START index_manager]
- manager = pywrapcp.RoutingIndexManager(
- len(data["time_matrix"]), data["num_vehicles"], data["depot"]
- )
- # [END index_manager]
-
- # Create Routing Model.
- # [START routing_model]
- routing = pywrapcp.RoutingModel(manager)
- # [END routing_model]
-
- # Create and register a transit callback.
- # [START transit_callback]
- def time_callback(from_index, to_index):
- """Returns the travel time between the two nodes."""
- # Convert from routing variable Index to time matrix NodeIndex.
- from_node = manager.IndexToNode(from_index)
- to_node = manager.IndexToNode(to_index)
- return data["time_matrix"][from_node][to_node]
-
- transit_callback_index = routing.RegisterTransitCallback(time_callback)
- # [END transit_callback]
-
- # Define cost of each arc.
- # [START arc_cost]
- routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
- # [END arc_cost]
-
- # Add Time Windows constraint.
- # [START time_windows_constraint]
- time = "Time"
-
- routing.AddDimension(
- transit_callback_index,
- 30, # allow waiting time
- 30, # maximum time per vehicle
- False, # Don't force cumulative time to be 0 at start of routes.
- time,
- )
- time_dimension = routing.GetDimensionOrDie(time)
- # Add time window constraints for each location except depot.
- for location_idx, time_window in enumerate(data["time_windows"]):
- if location_idx == 0:
- continue
- index = manager.NodeToIndex(location_idx)
- time_dimension.CumulVar(index).SetRange(time_window[0], time_window[1])
- # Add time window constraints for each vehicle start node.
- for vehicle_id in range(data["num_vehicles"]):
- index = routing.Start(vehicle_id)
- time_dimension.CumulVar(index).SetRange(
- data["time_windows"][0][0], data["time_windows"][0][1]
- )
- # [END time_windows_constraint]
-
- # Instantiate route start and end times to produce feasible times.
- # [START depot_start_end_times]
- for i in range(data["num_vehicles"]):
- routing.AddVariableMinimizedByFinalizer(
- time_dimension.CumulVar(routing.Start(i))
- )
- routing.AddVariableMinimizedByFinalizer(time_dimension.CumulVar(routing.End(i)))
- # [END depot_start_end_times]
-
- # Setting first solution heuristic.
- # [START parameters]
- search_parameters = pywrapcp.DefaultRoutingSearchParameters()
- search_parameters.first_solution_strategy = (
- routing_enums_pb2.FirstSolutionStrategy.PATH_CHEAPEST_ARC
- )
- # [END parameters]
-
- # Solve the problem.
- # [START solve]
- solution = routing.SolveWithParameters(search_parameters)
- # [END solve]
-
- # Print solution.
- # [START print_solution]
- if solution:
- routes = get_routes(solution, routing, manager)
- cumul_data = get_cumul_data(solution, routing, time_dimension)
- print_solution(routes, cumul_data)
- # [END print_solution]
-
-
-if __name__ == "__main__":
- main()
-# [END program_part1]
-# [END program]
diff --git a/ortools/constraint_solver/search.cc b/ortools/constraint_solver/search.cc
index 1955af19380..b3014ebf4e7 100644
--- a/ortools/constraint_solver/search.cc
+++ b/ortools/constraint_solver/search.cc
@@ -3013,8 +3013,8 @@ class RoundRobinCompoundObjectiveMonitor : public BaseObjectiveMonitor {
bool AcceptSolution() override {
return monitors_[active_monitor_]->AcceptSolution();
}
- bool LocalOptimum() override {
- const bool ok = monitors_[active_monitor_]->LocalOptimum();
+ bool AtLocalOptimum() override {
+ const bool ok = monitors_[active_monitor_]->AtLocalOptimum();
if (!ok) {
enabled_monitors_[active_monitor_] = false;
}
@@ -3388,9 +3388,17 @@ class TabuSearch : public Metaheuristic {
void EnterSearch() override;
void ApplyDecision(Decision* d) override;
bool AtSolution() override;
- bool LocalOptimum() override;
+ bool AcceptSolution() override;
+ bool AtLocalOptimum() override;
bool AcceptDelta(Assignment* delta, Assignment* deltadelta) override;
void AcceptNeighbor() override;
+ void BeginNextDecision(DecisionBuilder* const) override {
+ if (stop_search_) solver()->Fail();
+ }
+ void RefuteDecision(Decision* const d) override {
+ Metaheuristic::RefuteDecision(d);
+ if (stop_search_) solver()->Fail();
+ }
std::string DebugString() const override { return "Tabu Search"; }
protected:
@@ -3425,6 +3433,11 @@ class TabuSearch : public Metaheuristic {
int64_t forbid_tenure_;
double tabu_factor_;
int64_t stamp_;
+ int64_t solution_count_ = 0;
+ bool stop_search_ = false;
+ std::vector delta_values_;
+ SparseBitset<> delta_vars_;
+ std::vector var_index_to_index_;
};
TabuSearch::TabuSearch(Solver* solver, const std::vector& maximize,
@@ -3438,10 +3451,17 @@ TabuSearch::TabuSearch(Solver* solver, const std::vector& maximize,
keep_tenure_(keep_tenure),
forbid_tenure_(forbid_tenure),
tabu_factor_(tabu_factor),
- stamp_(0) {
+ stamp_(0),
+ delta_values_(vars.size(), 0),
+ delta_vars_(vars.size()) {
for (int index = 0; index < vars_.size(); ++index) {
assignment_container_.FastAdd(vars_[index]);
DCHECK_EQ(vars_[index], assignment_container_.Element(index).Var());
+ const int var_index = vars_[index]->index();
+ if (var_index >= var_index_to_index_.size()) {
+ var_index_to_index_.resize(var_index + 1, -1);
+ }
+ var_index_to_index_[var_index] = index;
}
}
@@ -3450,6 +3470,8 @@ void TabuSearch::EnterSearch() {
solver()->SetUseFastLocalSearch(true);
stamp_ = 0;
has_stored_assignment_ = false;
+ solution_count_ = 0;
+ stop_search_ = false;
}
void TabuSearch::ApplyDecision(Decision* const d) {
@@ -3482,21 +3504,19 @@ void TabuSearch::ApplyDecision(Decision* const d) {
MakeMinimizationVarsLessOrEqualWithSteps(
[this](int i) { return CurrentInternalValue(i); });
}
- // Avoid cost plateau's which lead to tabu cycles.
+}
+
+bool TabuSearch::AcceptSolution() {
+ // Avoid cost plateaus which lead to tabu cycles.
if (found_initial_solution_) {
- Constraint* plateau_ct = nullptr;
- if (Size() == 1) {
- plateau_ct = s->MakeNonEquality(MinimizationVar(0), last_values_[0]);
- } else {
- std::vector plateau_vars(Size());
- for (int i = 0; i < Size(); ++i) {
- plateau_vars[i] =
- s->MakeIsEqualCstVar(MinimizationVar(i), last_values_[i]);
+ for (int i = 0; i < Size(); ++i) {
+ if (last_values_[i] != MinimizationVar(i)->Min()) {
+ return true;
}
- plateau_ct = s->MakeSumLessOrEqual(plateau_vars, Size() - 1);
}
- s->AddConstraint(plateau_ct);
+ return false;
}
+ return true;
}
std::vector TabuSearch::CreateTabuVars() {
@@ -3519,6 +3539,7 @@ std::vector TabuSearch::CreateTabuVars() {
}
bool TabuSearch::AtSolution() {
+ ++solution_count_;
if (!ObjectiveMonitor::AtSolution()) {
return false;
}
@@ -3549,8 +3570,15 @@ bool TabuSearch::AtSolution() {
return true;
}
-bool TabuSearch::LocalOptimum() {
+bool TabuSearch::AtLocalOptimum() {
solver()->SetUseFastLocalSearch(false);
+ // If no solution has been accepted since the last local optimum, and no tabu
+ // lists are active, stop the search.
+ if (stamp_ > 0 && solution_count_ == 0 && keep_tabu_list_.empty() &&
+ forbid_tabu_list_.empty()) {
+ stop_search_ = true;
+ }
+ solution_count_ = 0;
AgeLists();
for (int i = 0; i < Size(); ++i) {
SetCurrentInternalValue(i, std::numeric_limits::max());
@@ -3569,26 +3597,32 @@ bool TabuSearch::AcceptDelta(Assignment* delta, Assignment* deltadelta) {
for (const IntVarElement& element : delta_container.elements()) {
if (!element.Bound()) return true;
}
+ delta_vars_.ResetAllToFalse();
+ for (const IntVarElement& element : delta_container.elements()) {
+ const int var_index = element.Var()->index();
+ if (var_index >= var_index_to_index_.size()) continue;
+ const int index = var_index_to_index_[var_index];
+ if (index == -1) continue;
+ delta_values_[index] = element.Value();
+ delta_vars_.Set(index);
+ }
int num_respected = 0;
- // TODO(user): Make this O(delta).
- auto get_value = [this, &delta_container](int var_index) {
- const IntVarElement* element =
- delta_container.ElementPtrOrNull(vars(var_index));
- return (element != nullptr)
- ? element->Value()
+ auto get_value = [this](int var_index) {
+ return delta_vars_[var_index]
+ ? delta_values_[var_index]
: assignment_container_.Element(var_index).Value();
};
+ const int64_t tabu_limit = TabuLimit();
for (const auto [var_index, value, unused_stamp] : synced_keep_tabu_list_) {
if (get_value(var_index) == value) {
- ++num_respected;
+ if (++num_respected >= tabu_limit) return true;
}
}
for (const auto [var_index, value, unused_stamp] : synced_forbid_tabu_list_) {
if (get_value(var_index) != value) {
- ++num_respected;
+ if (++num_respected >= tabu_limit) return true;
}
}
- const int64_t tabu_limit = TabuLimit();
if (num_respected >= tabu_limit) return true;
// Aspiration
// TODO(user): Add proper support for lex-objectives with steps.
@@ -3697,7 +3731,7 @@ class SimulatedAnnealing : public Metaheuristic {
std::vector initial_temperatures);
~SimulatedAnnealing() override {}
void ApplyDecision(Decision* d) override;
- bool LocalOptimum() override;
+ bool AtLocalOptimum() override;
void AcceptNeighbor() override;
std::string DebugString() const override { return "Simulated Annealing"; }
@@ -3756,7 +3790,7 @@ void SimulatedAnnealing::ApplyDecision(Decision* const d) {
}
}
-bool SimulatedAnnealing::LocalOptimum() {
+bool SimulatedAnnealing::AtLocalOptimum() {
for (int i = 0; i < Size(); ++i) {
SetCurrentInternalValue(i, std::numeric_limits::max());
}
@@ -3903,7 +3937,7 @@ class GuidedLocalSearch : public Metaheuristic {
void ApplyDecision(Decision* d) override;
bool AtSolution() override;
void EnterSearch() override;
- bool LocalOptimum() override;
+ bool AtLocalOptimum() override;
virtual int64_t AssignmentElementPenalty(int index) const = 0;
virtual int64_t AssignmentPenalty(int64_t var, int64_t value) const = 0;
virtual int64_t Evaluate(const Assignment* delta, int64_t current_penalty,
@@ -4172,7 +4206,7 @@ bool GuidedLocalSearch::AcceptDelta(Assignment* delta,
// Penalize (var, value) pairs of maximum utility, with
// utility(var, value) = cost(var, value) / (1 + penalty(var, value))
template
-bool GuidedLocalSearch::LocalOptimum() {
+bool GuidedLocalSearch
::AtLocalOptimum() {
solver()->SetUseFastLocalSearch(false);
std::vector utilities(num_vars_);
double max_utility = -std::numeric_limits::infinity();
diff --git a/ortools/constraint_solver/search_stats.proto b/ortools/constraint_solver/search_stats.proto
index 82cea906459..5031a1d06fc 100644
--- a/ortools/constraint_solver/search_stats.proto
+++ b/ortools/constraint_solver/search_stats.proto
@@ -89,10 +89,22 @@ message ConstraintSolverStatistics {
double duration_seconds = 5;
}
+// Statistics on sub-solvers.
+message SubSolverStatistics {
+ // Number of calls to Glop in LP scheduling.
+ int64 num_glop_calls_in_lp_scheduling = 1;
+ // Number of calls to CP-SAT in LP scheduling.
+ int64 num_cp_sat_calls_in_lp_scheduling = 2;
+ // Number of calls to min cost flow.
+ int64 num_min_cost_flow_calls = 3;
+}
+
// Search statistics.
message SearchStatistics {
// Local search statistics for each solver context.
repeated LocalSearchStatistics local_search_statistics = 1;
// Constraint solver statistics.
repeated ConstraintSolverStatistics constraint_solver_statistics = 2;
+ // Sub-solver statistics.
+ repeated SubSolverStatistics sub_solver_statistics = 3;
}
diff --git a/ortools/dotnet/Google.OrTools-full.csproj.in b/ortools/dotnet/Google.OrTools-full.csproj.in
index 727092df988..c20ce39b770 100644
--- a/ortools/dotnet/Google.OrTools-full.csproj.in
+++ b/ortools/dotnet/Google.OrTools-full.csproj.in
@@ -89,6 +89,10 @@
pdlp/%(Filename)%(Extension)
+
+ routing/%(Filename)%(Extension)
+
+
sat/%(Filename)%(Extension)
@@ -171,6 +175,11 @@
true
PreserveNewest
+
+ content/routing
+ true
+ PreserveNewest
+
content/sat
true
@@ -184,7 +193,7 @@
-
+
diff --git a/ortools/dotnet/Google.OrTools-local.csproj.in b/ortools/dotnet/Google.OrTools-local.csproj.in
index eb5a3eff5b7..a925e3e7249 100644
--- a/ortools/dotnet/Google.OrTools-local.csproj.in
+++ b/ortools/dotnet/Google.OrTools-local.csproj.in
@@ -89,6 +89,10 @@
pdlp/%(Filename)%(Extension)
+
+ routing/%(Filename)%(Extension)
+
+
sat/%(Filename)%(Extension)
@@ -159,6 +163,11 @@
true
PreserveNewest
+
+ content/routing
+ true
+ PreserveNewest
+
content/sat
true
@@ -172,7 +181,7 @@
-
+
diff --git a/ortools/flatzinc/challenge/Makefile b/ortools/flatzinc/challenge/Makefile
index 3dae17137dc..68a33b5fd2f 100644
--- a/ortools/flatzinc/challenge/Makefile
+++ b/ortools/flatzinc/challenge/Makefile
@@ -18,7 +18,7 @@ DOCKER_BUILD_CMD := docker build
endif
DOCKER_RUN_CMD := docker run --rm --init
-MZN_SUFFIX=2024v5
+MZN_SUFFIX=2025v2
DOCKER_NAME=cp-sat-minizinc-challenge
MZN_TAG=${DOCKER_NAME}:${MZN_SUFFIX}
MZN_LS_TAG=${DOCKER_NAME}-ls:${MZN_SUFFIX}
diff --git a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile
index 81db76a0ed2..d5d6d02634d 100644
--- a/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile
+++ b/ortools/flatzinc/challenge/minizinc-challenge-ls.Dockerfile
@@ -1,6 +1,6 @@
-FROM minizinc/mznc2024:latest AS env
+FROM minizinc/mznc2025:latest AS env
-ENV SRC_GIT_BRANCH v99bugfix
+ENV SRC_GIT_BRANCH=v99bugfix
ENV TZ=America/Los_Angeles
@@ -29,4 +29,6 @@ RUN ln -s /root/or-tools/bazel-bin/ortools/flatzinc/fz /entry_data/fzn-exec
RUN cp /root/or-tools/ortools/flatzinc/mznlib/*mzn /entry_data/mzn-lib
# Patch the run scripts
-RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true -G/g" /minizinc/mzn-exec-*
\ No newline at end of file
+RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true -p 1 -G/g" /minizinc/mzn-exec-fd
+RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true,num_workers:3 -G/g" /minizinc/mzn-exec-free
+RUN sed -i -e "s/-G/--fzn-flags --params=use_ls_only:true -G/g" /minizinc/mzn-exec-par
diff --git a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile
index d111f1e5f88..0fdfc256e61 100644
--- a/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile
+++ b/ortools/flatzinc/challenge/minizinc-challenge.Dockerfile
@@ -1,6 +1,6 @@
-FROM minizinc/mznc2024:latest AS env
+FROM minizinc/mznc2025:latest AS env
-ENV SRC_GIT_BRANCH v99bugfix
+ENV SRC_GIT_BRANCH=v99bugfix
ENV TZ=America/Los_Angeles
diff --git a/ortools/graph/BUILD.bazel b/ortools/graph/BUILD.bazel
index d8d0a5c07d6..fac523588a4 100644
--- a/ortools/graph/BUILD.bazel
+++ b/ortools/graph/BUILD.bazel
@@ -52,6 +52,8 @@ cc_test(
":graph",
"//ortools/base:gmock_main",
"//ortools/base:intops",
+ "//ortools/base:strong_vector",
+ "@abseil-cpp//absl/algorithm:container",
"@abseil-cpp//absl/log:check",
"@abseil-cpp//absl/random",
"@abseil-cpp//absl/strings",
@@ -86,7 +88,9 @@ cc_library(
hdrs = ["bounded_dijkstra.h"],
deps = [
":graph",
+ "//ortools/base:intops",
"//ortools/base:iterator_adaptors",
+ "//ortools/base:strong_vector",
"//ortools/base:threadpool",
"//ortools/base:top_n",
"@abseil-cpp//absl/algorithm:container",
@@ -107,6 +111,7 @@ cc_test(
":test_util",
"//ortools/base:dump_vars",
"//ortools/base:gmock_main",
+ "//ortools/base:intops",
"//ortools/util:flat_matrix",
"@abseil-cpp//absl/log:check",
"@abseil-cpp//absl/random",
@@ -858,7 +863,7 @@ cc_test(
deps = [
":iterators",
"//ortools/base:gmock_main",
- "//ortools/base:strong_int",
+ "//ortools/base:intops",
],
)
diff --git a/ortools/graph/bounded_dijkstra.h b/ortools/graph/bounded_dijkstra.h
index e4522e5d900..98a7fc7e5f3 100644
--- a/ortools/graph/bounded_dijkstra.h
+++ b/ortools/graph/bounded_dijkstra.h
@@ -15,8 +15,10 @@
#define OR_TOOLS_GRAPH_BOUNDED_DIJKSTRA_H_
#include
+#include
#include
#include
+#include
#include
#include
@@ -25,6 +27,8 @@
#include "absl/log/check.h"
#include "absl/types/span.h"
#include "ortools/base/iterator_adaptors.h"
+#include "ortools/base/strong_int.h"
+#include "ortools/base/strong_vector.h"
#include "ortools/base/top_n.h"
#include "ortools/graph/graph.h"
@@ -54,22 +58,40 @@ namespace operations_research {
// is >= limit we will return {limit, {}}. As a consequence any arc length >=
// limit is the same as no arc. The code is also overflow-safe and will behave
// correctly if the limit is int64max or infinity.
-template
-std::pair> SimpleOneToOneShortestPath(
- int source, int destination, absl::Span tails,
- absl::Span heads, absl::Span lengths,
+template
+std::pair> SimpleOneToOneShortestPath(
+ NodeIndex source, NodeIndex destination, absl::Span tails,
+ absl::Span heads, absl::Span lengths,
DistanceType limit = std::numeric_limits::max());
-template
+namespace internal {
+
+// TODO(user): We should move `is_strong_int` to util/intops/strong_int.h.
+template
+struct is_strong_int : std::false_type {};
+
+template
+struct is_strong_int<::util_intops::StrongInt>
+ : std::true_type {};
+
+template
+using IndexedVector =
+ std::conditional_t::value,
+ ::util_intops::StrongVector,
+ std::vector>;
+
+template
class ElementGetter {
public:
- explicit ElementGetter(const std::vector& c) : c_(c) {}
- const T& operator()(int index) const { return c_[index]; }
+ explicit ElementGetter(const IndexedVector& c) : c_(c) {}
+ const T& operator()(ArcIndex index) const { return c_[index]; }
private:
- const std::vector& c_;
+ const IndexedVector& c_;
};
+} // namespace internal
+
// A wrapper that holds the memory needed to run many bounded shortest path
// computations on the given graph. The graph must implement the
// interface described in graph.h (without the need for reverse arcs).
@@ -92,12 +114,20 @@ class ElementGetter {
// negative source_offset, arc with a length greater than the distance_limit can
// still be considered!
template >
+ class ArcLengthFunctor = internal::ElementGetter<
+ DistanceType, typename GraphType::ArcIndex>>
class BoundedDijkstraWrapper {
public:
- typedef typename GraphType::NodeIndex node_type;
+ typedef typename GraphType::NodeIndex NodeIndex;
+ typedef typename GraphType::ArcIndex ArcIndex;
typedef DistanceType distance_type;
+ // A vector of T, indexed by NodeIndex/ArcIndex.
+ template
+ using ByNode = internal::IndexedVector;
+ template
+ using ByArc = internal::IndexedVector;
+
// IMPORTANT: Both arguments must outlive the class. The arc lengths cannot be
// negative and the vector must be of the correct size (both preconditions are
// CHECKed).
@@ -106,7 +136,7 @@ class BoundedDijkstraWrapper {
// RunBoundedDijkstra(). That's fine. Doing so will obviously invalidate the
// reader API of the last Dijkstra run, which could return junk, or crash.
BoundedDijkstraWrapper(const GraphType* graph,
- const std::vector* arc_lengths);
+ const ByArc* arc_lengths);
// Variant that takes a custom arc length functor and copies it locally.
BoundedDijkstraWrapper(const GraphType* graph,
@@ -116,8 +146,8 @@ class BoundedDijkstraWrapper {
// of the graph within the distance limit (exclusive). The first element of
// the returned vector will always be the source_node with a distance of zero.
// See RunBoundedDijkstraFromMultipleSources() for more information.
- const std::vector& RunBoundedDijkstra(int source_node,
- DistanceType distance_limit) {
+ const std::vector& RunBoundedDijkstra(
+ NodeIndex source_node, DistanceType distance_limit) {
return RunBoundedDijkstraFromMultipleSources({{source_node, 0}},
distance_limit);
}
@@ -127,7 +157,8 @@ class BoundedDijkstraWrapper {
//
// If this returns true, you can get the path distance with distances()[to]
// and the path with ArcPathTo(to) or NodePathTo(to).
- bool OneToOneShortestPath(int from, int to, DistanceType distance_limit);
+ bool OneToOneShortestPath(NodeIndex from, NodeIndex to,
+ DistanceType distance_limit);
// Returns the list of all the nodes which are under the given distance limit
// (exclusive) from at least one of the given source nodes (which also have
@@ -136,8 +167,8 @@ class BoundedDijkstraWrapper {
// By "distance", we mean the length of the shortest path from any source
// plus the source's distance offset, where the length of a path is the
// sum of the length of its arcs
- const std::vector& RunBoundedDijkstraFromMultipleSources(
- const std::vector>&
+ const std::vector& RunBoundedDijkstraFromMultipleSources(
+ const std::vector>&
sources_with_distance_offsets,
DistanceType distance_limit);
@@ -162,10 +193,11 @@ class BoundedDijkstraWrapper {
//
// Note that the distances() will take the source offsets into account,
// but not the destination offsets.
- std::vector RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
- const std::vector>&
+ std::vector
+ RunBoundedDijkstraFromMultipleSourcesToMultipleDestinations(
+ const std::vector>&
sources_with_distance_offsets,
- const std::vector>&
+ const std::vector>&
destinations_with_distance_offsets,
int num_destinations_to_reach, DistanceType distance_limit);
@@ -174,19 +206,19 @@ class BoundedDijkstraWrapper {
// happens at most once per node, when popping it from the Dijkstra queue,
// meaning that the node has been fully 'processed'). This callback may modify
// the distance limit dynamically, thus affecting the stopping criterion.
- const std::vector& RunBoundedDijkstraWithSettledNodeCallback(
- const std::vector>&
+ const std::vector& RunBoundedDijkstraWithSettledNodeCallback(
+ const std::vector>&
sources_with_distance_offsets,
- std::function
settled_node_callback,
DistanceType distance_limit);
// Returns true if `node` was reached by the last Run*() call.
- bool IsReachable(int node) const { return is_reached_[node]; }
+ bool IsReachable(NodeIndex node) const { return is_reached_[node]; }
// Returns all the reached nodes form the previous Run*() call.
- const std::vector& reached_nodes() const { return reached_nodes_; }
+ const ByNode& reached_nodes() const { return reached_nodes_; }
// The following vectors are all indexed by graph node indices.
//
@@ -194,7 +226,7 @@ class BoundedDijkstraWrapper {
// reached nodes are updated, the others will contain junk.
// The distance of the nodes from their source.
- const std::vector& distances() const { return distances_; }
+ const ByNode& distances() const { return distances_; }
// The parent of the nodes in the shortest path from their source.
// When a node doesn't have any parent (it has to be a source), its parent
@@ -203,27 +235,29 @@ class BoundedDijkstraWrapper {
// arcs have a length of zero.
// Note also that some sources may have parents, because of the initial
// distances.
- const std::vector& parents() const { return parents_; }
+ const ByNode& parents() const { return parents_; }
// The arc reaching a given node in the path from their source.
// arc_from_source()[x] is undefined (i.e. junk) when parents()[x] == x.
- const std::vector& arc_from_source() const { return arc_from_source_; }
+ const ByNode& arc_from_source() const { return arc_from_source_; }
// Returns the list of all the arcs in the shortest path from the node's
// source to the node.
- std::vector ArcPathTo(int node) const;
+ std::vector ArcPathTo(NodeIndex node) const;
ABSL_DEPRECATED("Use ArcPathTo() instead.")
- std::vector ArcPathToNode(int node) const { return ArcPathTo(node); }
+ std::vector ArcPathToNode(NodeIndex node) const {
+ return ArcPathTo(node);
+ }
// Returns the list of all the nodes in the shortest path from the node's
// source to the node. This always start by the node's source, and end by
// the given node. In the case that source == node, returns {node}.
- std::vector NodePathTo(int node) const;
+ std::vector NodePathTo(NodeIndex node) const;
// Returns the node's source. This is especially useful when running
// Dijkstras from multiple sources.
- int SourceOfShortestPathToNode(int node) const;
+ NodeIndex SourceOfShortestPathToNode(NodeIndex node) const;
// Original Source/Destination index extraction, after a call to the
// multi-source and/or multi-destination variants:
@@ -239,16 +273,16 @@ class BoundedDijkstraWrapper {
// rely on the value.
//
// These methods are invalidated by the next RunBoundedDijkstra*() call.
- int GetSourceIndex(int node) const;
- int GetDestinationIndex(int node) const;
+ int GetSourceIndex(NodeIndex node) const;
+ int GetDestinationIndex(NodeIndex node) const;
// Trivial accessors to the underlying graph and arc lengths.
const GraphType& graph() const { return *graph_; }
- const std::vector& arc_lengths() const {
+ const ByArc& arc_lengths() const {
CHECK(arc_lengths_);
return *arc_lengths_;
}
- DistanceType GetArcLength(int arc) const {
+ DistanceType GetArcLength(ArcIndex arc) const {
const DistanceType length = arc_length_functor_(arc);
DCHECK_GE(length, 0);
return length;
@@ -262,18 +296,18 @@ class BoundedDijkstraWrapper {
// The Graph and length of each arc.
const GraphType* const graph_;
ArcLengthFunctor arc_length_functor_;
- const std::vector* const arc_lengths_;
+ const ByArc* const arc_lengths_;
// Data about the last Dijkstra run.
- std::vector distances_;
- std::vector parents_;
- std::vector arc_from_source_;
- std::vector is_reached_;
- std::vector reached_nodes_;
+ ByNode distances_;
+ ByNode parents_;
+ ByNode arc_from_source_;
+ ByNode is_reached_;
+ std::vector reached_nodes_;
// Priority queue of nodes, ordered by their distance to the source.
struct NodeDistance {
- node_type node; // The target node.
+ NodeIndex node; // The target node.
DistanceType distance; // Its distance from the source.
bool operator<(const NodeDistance& other) const {
@@ -287,7 +321,7 @@ class BoundedDijkstraWrapper {
// or ieee754 floating-point, when the machine is little endian, and
// when the total size of NodeDistance equals 16 bytes).
// And here are the speeds of the BM_GridGraph benchmark (in which
- // DistanceType=int64_t and node_type=int32_t), done with benchy
+ // DistanceType=int64_t and NodeIndex=int32_t), done with benchy
// --runs=20: 0) BM_GridGraph 9.22ms ± 5% BM_GridGraph 3.19ms
// ± 6% 1) BM_GridGraph 8.89ms ± 4% BM_GridGraph 3.07ms ±
// 3% 2) BM_GridGraph 8.61ms ± 3% BM_GridGraph 3.13ms ± 6%
@@ -303,8 +337,8 @@ class BoundedDijkstraWrapper {
// The vectors are only allocated after they are first used.
// Between calls, is_destination_ is all false, and the rest is junk.
std::vector is_destination_;
- std::vector node_to_source_index_;
- std::vector node_to_destination_index_;
+ ByNode node_to_source_index_;
+ ByNode node_to_destination_index_;
};
// -----------------------------------------------------------------------------
@@ -314,12 +348,12 @@ class BoundedDijkstraWrapper {
template
BoundedDijkstraWrapper