Autowalker is now using the GOAP AI system and works way better. Still quite a lot of jank in the code but that'll get removed over time. Next thing is being able to detect when its near an item/enemy and properly react.

This commit is contained in:
Zed A. Shaw 2025-03-12 12:15:21 -04:00
parent ff81c78d13
commit d15c9b12fd
9 changed files with 84 additions and 47 deletions

View file

@ -31,6 +31,11 @@ namespace ai {
}
}
void Action::ignore(int name) {
$positive_preconds[name] = false;
$negative_preconds[name] = false;
}
bool Action::can_effect(State& state) {
return ((state & $positive_preconds) == $positive_preconds) &&
@ -41,16 +46,15 @@ namespace ai {
return (state | $positive_effects) & ~$negative_effects;
}
int distance_to_goal(State& from, State& to) {
int distance_to_goal(State from, State to, Action& action) {
auto result = from ^ to;
return result.count();
return result.count() + action.cost;
}
Script reconstruct_path(std::unordered_map<Action, Action>& came_from, Action& current) {
Script total_path{current};
int count = 0;
while(came_from.contains(current) && count++ < 10) {
while(came_from.contains(current)) {
current = came_from.at(current);
if(current != FINAL_ACTION) {
total_path.push_front(current);
@ -60,12 +64,12 @@ namespace ai {
return total_path;
}
inline int h(State& start, State& goal) {
return distance_to_goal(start, goal);
inline int h(State start, State goal, Action& action) {
return distance_to_goal(start, goal, action);
}
inline int d(State& start, State& goal) {
return distance_to_goal(start, goal);
inline int d(State start, State goal, Action& action) {
return distance_to_goal(start, goal, action);
}
ActionState find_lowest(std::unordered_map<ActionState, int>& open_set) {
@ -90,7 +94,7 @@ namespace ai {
ActionState current{FINAL_ACTION, start};
g_score[start] = 0;
open_set[current] = g_score[start] + h(start, goal);
open_set[current] = g_score[start] + h(start, goal, current.action);
while(!open_set.empty()) {
current = find_lowest(open_set);
@ -109,7 +113,7 @@ namespace ai {
}
auto neighbor = neighbor_action.apply_effect(current.state);
int d_score = d(current.state, neighbor);
int d_score = d(current.state, neighbor, current.action);
int tentative_g_score = g_score[current.state] + d_score;
int neighbor_g_score = g_score.contains(neighbor) ? g_score[neighbor] : SCORE_MAX;
if(tentative_g_score < neighbor_g_score) {
@ -118,7 +122,7 @@ namespace ai {
g_score[neighbor] = tentative_g_score;
// open_set gets the fScore
ActionState neighbor_as{neighbor_action, neighbor};
open_set[neighbor_as] = tentative_g_score + h(neighbor, goal);
open_set[neighbor_as] = tentative_g_score + h(neighbor, goal, neighbor_as.action);
}
}
}