1
+ use std:: collections:: BTreeMap ;
2
+
1
3
use crate :: {
2
4
action:: Action ,
3
5
compare:: { check_preconditions, compare_values} ,
6
+ datum:: Datum ,
4
7
effect:: Effect ,
5
8
goal:: Goal ,
6
- mutator :: { print_mutators , Mutator } ,
7
- state :: LocalState ,
9
+ localstate :: LocalState ,
10
+ mutator :: { apply_mutator , print_mutators , Mutator } ,
8
11
} ;
9
12
10
13
use bevy_reflect:: Reflect ;
@@ -44,35 +47,22 @@ fn successors<'a>(
44
47
) -> impl Iterator < Item = ( Node , usize ) > + ' a {
45
48
let state = node. state ( ) ;
46
49
actions. iter ( ) . filter_map ( move |action| {
47
- if check_preconditions ( state, action) {
48
- let mut new_state = state. clone ( ) ;
49
- if action. effects . len ( ) == 0 {
50
- return None ;
51
- }
52
- for mutator in & action. effects [ 0 ] . mutators {
53
- match mutator {
54
- Mutator :: Set ( key, value) => {
55
- new_state. data . insert ( key. to_string ( ) , * value) ;
56
- }
57
- Mutator :: Increment ( key, value) => {
58
- if let Some ( current_value) = new_state. data . get_mut ( key) {
59
- * current_value += * value;
60
- }
61
- }
62
- Mutator :: Decrement ( key, value) => {
63
- if let Some ( current_value) = new_state. data . get_mut ( key) {
64
- * current_value -= * value;
65
- }
66
- }
67
- }
50
+ if check_preconditions ( state, action) && !action. effects . is_empty ( ) {
51
+ let new_state = state. clone ( ) ;
52
+ let first_effect = & action. effects [ 0 ] ;
53
+
54
+ let mut new_data = new_state. data . clone ( ) ;
55
+ for mutator in & first_effect. mutators {
56
+ apply_mutator ( & mut new_data, mutator) ;
68
57
}
58
+
69
59
let new_effect = Effect {
70
- action : action . effects [ 0 ] . action . clone ( ) ,
71
- mutators : action . effects [ 0 ] . mutators . clone ( ) ,
72
- cost : action . effects [ 0 ] . cost ,
73
- state : new_state ,
60
+ action : first_effect . action . clone ( ) ,
61
+ mutators : first_effect . mutators . clone ( ) ,
62
+ cost : first_effect . cost ,
63
+ state : LocalState { data : new_data } ,
74
64
} ;
75
- Some ( ( Node :: Effect ( new_effect) , action . effects [ 0 ] . cost ) )
65
+ Some ( ( Node :: Effect ( new_effect) , first_effect . cost ) )
76
66
} else {
77
67
None
78
68
}
@@ -89,18 +79,6 @@ fn is_goal(node: &Node, goal: &Goal) -> bool {
89
79
} )
90
80
}
91
81
92
- /// We implement two different strategies for finding a solution
93
- #[ derive( Default ) ]
94
- pub enum PlanningStrategy {
95
- #[ default]
96
- /// StartToGoal begins with our current state, and finds the most optimal path to the goal, based on the costs
97
- /// Might take longer time than GoalToStart, but finds the path with the lowest cost
98
- StartToGoal ,
99
- /// GoalToStart begins with the goal state, and works backwards from there, in order to find a path as quick as possible
100
- /// Might lead to less-than-optimial paths, but should find a valid path quicker
101
- GoalToStart ,
102
- }
103
-
104
82
pub fn make_plan_with_strategy (
105
83
strategy : PlanningStrategy ,
106
84
start : & LocalState ,
@@ -123,6 +101,18 @@ pub fn make_plan_with_strategy(
123
101
}
124
102
}
125
103
104
+ /// We implement two different strategies for finding a solution
105
+ #[ derive( Default ) ]
106
+ pub enum PlanningStrategy {
107
+ #[ default]
108
+ /// StartToGoal begins with our current state, and finds the most optimal path to the goal, based on the costs
109
+ /// Might take longer time than GoalToStart, but finds the path with the lowest cost
110
+ StartToGoal ,
111
+ /// GoalToStart begins with the goal state, and works backwards from there, in order to find a path as quick as possible
112
+ /// Might lead to less-than-optimial paths, but should find a valid path quicker
113
+ GoalToStart ,
114
+ }
115
+
126
116
pub fn make_plan (
127
117
start : & LocalState ,
128
118
actions : & [ Action ] ,
0 commit comments