diff --git a/sweepai/api.py b/sweepai/api.py index c63b972aea..3cf8c9a580 100644 --- a/sweepai/api.py +++ b/sweepai/api.py @@ -809,9 +809,6 @@ def handle_event(request_dict, event): except Exception as e: logger.exception(f"Failed to add config to top repos: {e}") case "pull_request", "edited": - # apparently body is sometimes None - if not request_dict.get('body', ''): - request_dict['body'] = '' request = PREdited(**request_dict) if ( diff --git a/sweepai/core/vector_db.py b/sweepai/core/vector_db.py index 189937ad13..e45ef07027 100644 --- a/sweepai/core/vector_db.py +++ b/sweepai/core/vector_db.py @@ -190,7 +190,7 @@ def openai_call_embedding(batch: list[str], input_type: str="document"): raise e except openai.BadRequestError as e: # In the future we can better handle this by averaging the embeddings of the split batch - if "This model's maximum context length" in str(e): + if "maximum context length" in str(e): logger.warning(f"Token count exceeded for batch: {max([tiktoken_client.count(text) for text in batch])} truncating down to 8192 tokens.") batch = [tiktoken_client.truncate_string(text) for text in batch] return openai_call_embedding(batch, input_type) diff --git a/sweepai/utils/github_utils.py b/sweepai/utils/github_utils.py index d61f464c30..1b34535a09 100644 --- a/sweepai/utils/github_utils.py +++ b/sweepai/utils/github_utils.py @@ -405,6 +405,9 @@ def __post_init__(self): self.commit_hash = self.repo.get_commits()[0].sha self.git_repo = self.clone() self.branch = self.branch or SweepConfig.get_branch(self.repo) + # branch may have been deleted or not exist + if self.branch not in self.git_repo.heads: + raise Exception(f"Branch '{self.branch}' does not exist.") self.git_repo.git.checkout(self.branch) def __del__(self): diff --git a/sweepai/web/events.py b/sweepai/web/events.py index 7aebdccb91..2157de96c1 100644 --- a/sweepai/web/events.py +++ b/sweepai/web/events.py @@ -32,7 +32,7 @@ class User(BaseModel): html_url: str title: str - body: str + body: str | None number: int user: User