Skip to content

Commit

Permalink
feat: add new scrapper and output to file
Browse files Browse the repository at this point in the history
  • Loading branch information
SaboniAmine committed Oct 4, 2024
1 parent 310858a commit f0707af
Show file tree
Hide file tree
Showing 8 changed files with 1,520 additions and 71 deletions.
3 changes: 3 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@

# Install the repo

First install the scrapper dependencies, following the tutorial on the repo: https://github.com/codelucas/newspaper?tab=readme-ov-file#get-it-now
Only install the OS dependencies, as the package will be installed in a virtual env, below.

Install first the uv package:

```bash
Expand Down
23 changes: 23 additions & 0 deletions climateguard/models.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@

from pydantic import BaseModel


class Claim(BaseModel):
claim: str
context: str
analysis: str
disinformation_score: str
disinformation_category: str

class Claims(BaseModel):
claims: list[Claim]


class Article(BaseModel):
title: str
content: str
url: str
date: str
topic: str
source: str

63 changes: 0 additions & 63 deletions climateguard/news_scrapper

This file was deleted.

116 changes: 116 additions & 0 deletions climateguard/news_scrapper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
import requests
from bs4 import BeautifulSoup
import re
from datetime import datetime
import json
from models import Article
from newspaper import Article as NewspaperArticle
from urllib.parse import urlparse

class NewsScraper:
def __init__(self):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
}

def scrape_article(self, url):
# Try NewspaperArticle first
newspaper_article = NewspaperArticle(url)
newspaper_article.download()
newspaper_article.parse()

if newspaper_article.text:
return Article(
title=newspaper_article.title,
content=newspaper_article.text,
url=url,
date=str(newspaper_article.publish_date) if newspaper_article.publish_date else '',
topic='', # NewspaperArticle doesn't provide a topic
source=url
)

# If NewspaperArticle fails to extract text, use custom scrapers
response = requests.get(url, headers=self.headers)
soup = BeautifulSoup(response.content, 'html.parser')

if 'lsm.lv' in url:
return self._scrape_lsm(soup, url)
elif 'delfi.lv' in url:
return self._scrape_delfi(soup, url)
elif 'nra.lv' in url:
return self._scrape_nra(soup, url)
else:
raise ValueError("Unsupported website")

def _scrape_lsm(self, soup, url):
content = ' '.join([p.text for p in soup.find_all('p')])
title = soup.find('h1').text.strip() if soup.find('h1') else ''
topic = soup.find('meta', {'property': 'article:section'})['content'] if soup.find('meta', {'property': 'article:section'}) else ''
date = soup.find('meta', {'property': 'article:published_time'})['content'] if soup.find('meta', {'property': 'article:published_time'}) else ''

return Article(
title=title,
content=content,
url=url,
date=date,
topic=topic,
source=url
)

def _scrape_delfi(self, soup, url):
content = ' '.join([p.text for p in soup.find_all('p', class_='C-article-body__paragraph')])
title = soup.find('h1', class_='C-article-headline').text.strip() if soup.find('h1', class_='C-article-headline') else ''
topic = soup.find('a', class_='C-article-info__category').text.strip() if soup.find('a', class_='C-article-info__category') else ''
date = soup.find('time', class_='C-article-info__time')['datetime'] if soup.find('time', class_='C-article-info__time') else ''

return Article(
title=title,
content=content,
url=url,
date=date,
topic=topic,
source=url
)

def _scrape_nra(self, soup, url):
content = ' '.join([p.text for p in soup.find_all('p', class_='article-text')])
title = soup.find('h1', class_='article-title').text.strip() if soup.find('h1', class_='article-title') else ''
topic = soup.find('span', class_='article-category').text.strip() if soup.find('span', class_='article-category') else ''
date = soup.find('time', class_='article-date')['datetime'] if soup.find('time', class_='article-date') else ''

return Article(
title=title,
content=content,
url=url,
date=date,
topic=topic,
source=url
)

# Usage example:
if __name__ == "__main__":
scraper = NewsScraper()
urls = [
"https://www.lsm.lv/raksts/dzive--stils/vide-un-dzivnieki/03.10.2024-zinojums-lidz-gadsimta-beigam-latvija-prognozeta-krasta-linijas-atkapsanas-par-47-72-metriem.a571093/",
"https://www.delfi.lv/bizness/56234200/eiropas-zinas/120042670/zinam-problemu-un-neizmantojam-risinajumus-ko-latvijas-iedzivotaji-doma-par-klimata-parmainam",
"https://www.delfi.lv/bizness/56234200/eiropas-zinas/120042670/kutri-izmantojam-dzerama-udens-kranus-kapec-iedzivotajiem-trukst-pamudinajuma-dzivot-zalak",
"https://nra.lv/pasaule/465572-sliktas-zinas-baltvina-cienitajiem.htm",
"https://www.lsm.lv/raksts/dzive--stils/vide-un-dzivnieki/20.09.2024-par-zalaku-rigu-spriedis-piecas-sestdienas-ko-sagaida-no-pirmas-iedzivotaju-klimata-asamblejas.a569637/"
]

articles = []

for url in urls:
article = scraper.scrape_article(url)
articles.append(article)
print(f"Scraped: {article.title}")
print(f"Content length: {len(article.content)}")
print(f"Date: {article.date}")
print("---")

# Save to JSON
output_file = 'scraped_articles.json'
with open(output_file, 'w', encoding='utf-8') as f:
json.dump([article.dict() for article in articles], f, ensure_ascii=False, indent=4)

print(f"\nArticles saved to {output_file}")
165 changes: 165 additions & 0 deletions gradio_app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,165 @@
import os
from dotenv import load_dotenv
import gradio as gr
import pandas as pd
from anthropic import Anthropic
from pydantic import BaseModel

from climateguard.models import Claims

# Load environment variables from .env file
load_dotenv()

# Get the API key from environment variables
ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY")

def read_parquet(uploaded_file):
return pd.read_parquet(uploaded_file)

# Initialize the Anthropic client
client = Anthropic(api_key=ANTHROPIC_API_KEY)


# ... (keep the detect_claims, analyze_disinformation, and read_parquet functions as they are) ...

MAPPING = {
"very low": 0,
"low": 1,
"medium": 2,
"high": 5,
}

def detect_claims(transcription):
prompt = f"""
Tu es expert en désinformation sur les sujets environnementaux, expert en science climatique et sachant tout sur le GIEC. Je vais te donner un extrait d'une retranscription de 2 minutes d'un flux TV ou Radio.
A partir de cet extrait liste moi tous les faits/opinions environnementaux (claim) uniques qu'il faudrait factchecker. Et pour chaque claim, donne une première analyse si c'est de la désinformation ou non, un score si c'est de la désinformation, ainsi qu'une catégorisation de cette allégation.
Ne sélectionne que les claims sur les thématiques environnementales (changement climatique, transition écologique, énergie, biodiversité, pollution, pesticides, ressources (eau, minéraux, ..) et pas sur les thématiques sociales et/ou économiques
Renvoie le résultat en json sans autre phrase d'introduction ou de conclusion avec à chaque fois les 5 champs suivants : "claim",analysis","disinformation_score","disinformation_category"
- "claim" - l'allégation à potentiellement vérifier
- "context" - reformulation du contexte dans laquelle cette allégation a été prononcée (maximum 1 paragraphe)
- "analysis" - première analyse du point de vue de l'expert sur le potentiel de désinformation de cette allégation en fonction du contexte
Pour les scores "disinformation_score"
- "very low" = pas de problème, l'allégation n'est pas trompeuse ou à risque. pas besoin d'investiguer plus loin
- "low" = allégation qui nécessiterait une vérification et une interrogation, mais sur un sujet peu important et significatif dans le contexte des enjeux écologiques (exemple : les tondeuses à gazon,
- "medium" = allégation problématique sur un sujet écologique important (scientifique, impacts, élections, politique, transport, agriculture, énergie, alimentation, démocratie ...) , qui nécessiterait vraiment d'être vérifiée, déconstruite, débunkée et interrogée. En particulier pour les opinions fallacieuses
- "high" = allégation grave, en particulier si elle nie le consensus scientifique
Pour les catégories de désinformation "disinformation_category":
- "consensus" = négation du consensus scientifique
- "facts" = fait à vérifier, à préciser ou contextualiser
- "narrative" = narratif fallacieux ou opinion qui sème le doute (par exemple : "les écolos veulent nous enlever nos libertés")
- "other"
<transcription>
{transcription}
</transcription>
"""

completion = client.completions.create(
model="claude-2.1",
prompt=prompt,
max_tokens_to_sample=2000,
temperature=0.2,
)
response_content = completion.completion

# Parse the JSON response
claims_data = Claims.model_validate_json(response_content)
result = pd.DataFrame([claim.dict() for claim in claims_data.claims])

return result, completion.usage.total_tokens


def analyze_disinformation(claim):

prompt = f"""
Tu es expert du changement climatique, scientifique du GIEC.
Voici une allégation qui pourrait s'apparenter à de la désinformation sur les enjeux écologiques prononcées à la TV.
{claim}
Peux-tu en faire une analyse complète de pourquoi c'est de la désinformation, puis en débunkant de façon sourcée.
Renvoie directement ton analyse sans message d'introduction ou de conclusion.
"""

completion = client.beta.chat.completions.parse(
model="gpt-4o",
messages=[
{
"role": "user",
"content": prompt,
}
],
# response_format=Claims,
)
n_tokens = completion.usage.total_tokens
message = completion.choices[0].message.content

return message,n_tokens



def detect_claims_interface(text_input, source_type):
claims, _ = detect_claims(text_input)
claims["score"] = claims["disinformation_score"].map(lambda x: MAPPING.get(x))
average_score = round(claims["score"].mean(), 1)

output = f"Disinformation risk average score: {average_score}/5\n\n"

for _, row in claims.iterrows():
score = row["score"]
risk_level = "Low" if score <= 1 else "Medium" if score == 2 else "High"

output += f"Claim: {row['claim']}\n"
output += f"Context: {row['context']}\n"
output += f"Disinformation risk score: {row['disinformation_score']} ({row['score']}/5)\n"
output += f"Analysis: {row['analysis']}\n\n"

return output, claims

def analyze_disinformation_interface(claim, sources):
analysis, _ = analyze_disinformation(claim)
return analysis

def generate_alert(alert_type):
# Placeholder function for alert generation
return f"Generated {alert_type} alert"

def gradio_app():
with gr.Blocks() as app:
gr.Markdown("# CLIMATE DISINFORMATION DETECTION")

with gr.Tab("Climate speech detection"):
file_upload = gr.File(label="Choose a file to analyze", file_types=[".parquet"])
output_dataframe = gr.Dataframe()

file_upload.upload(read_parquet, file_upload, output_dataframe)

with gr.Tab("Claims detection"):
text_input = gr.Textbox(label="Enter text for analysis", lines=5)
source_type = gr.Dropdown(["TV / Radio", "Social network post", "Video transcript"], label="Select source type")
detect_button = gr.Button("Analyze")
claims_output = gr.Textbox(label="Analysis Results", lines=10)

detect_button.click(detect_claims_interface, inputs=[text_input, source_type], outputs=[claims_output])

with gr.Tab("Climate disinformation analysis"):
claim_dropdown = gr.Dropdown(label="Select claim")
sources = gr.CheckboxGroup(["IPCC", "IPBES", "ADEME", "ClimateFeedback"], label="Select sources", value=["IPCC", "IPBES", "ADEME", "ClimateFeedback"])
analysis_output = gr.Textbox(label="Analysis", lines=10)

claim_dropdown.change(analyze_disinformation_interface, inputs=[claim_dropdown, sources], outputs=analysis_output)

with gr.Tab("Alert generation"):
alert_type = gr.Radio(["ARCOM", "DSA"], label="Select alert type")
generate_button = gr.Button("Generate Alert")
alert_output = gr.Textbox(label="Alert Output", lines=5)

generate_button.click(generate_alert, inputs=alert_type, outputs=alert_output)

return app

if __name__ == "__main__":
app = gradio_app()
app.launch()
Loading

0 comments on commit f0707af

Please sign in to comment.