enh: proxies (#157)

* enh: proxies

* enh: proxies
This commit is contained in:
Cullen Watson
2024-05-25 14:04:09 -05:00
committed by GitHub
parent cd29f79796
commit 5cb7ffe5fd
12 changed files with 149 additions and 354 deletions

View File

@@ -30,7 +30,7 @@ def scrape_jobs(
results_wanted: int = 15,
country_indeed: str = "usa",
hyperlinks: bool = False,
proxy: str | None = None,
proxies: list[str] | str | None = None,
description_format: str = "markdown",
linkedin_fetch_description: bool | None = False,
linkedin_company_ids: list[int] | None = None,
@@ -96,7 +96,7 @@ def scrape_jobs(
def scrape_site(site: Site) -> Tuple[str, JobResponse]:
scraper_class = SCRAPER_MAPPING[site]
scraper = scraper_class(proxy=proxy)
scraper = scraper_class(proxies=proxies)
scraped_data: JobResponse = scraper.scrape(scraper_input)
cap_name = site.value.capitalize()
site_name = "ZipRecruiter" if cap_name == "Zip_recruiter" else cap_name

View File

@@ -39,9 +39,9 @@ class ScraperInput(BaseModel):
class Scraper(ABC):
def __init__(self, site: Site, proxy: list[str] | None = None):
def __init__(self, site: Site, proxies: list[str] | None = None):
self.proxies = proxies
self.site = site
self.proxy = (lambda p: {"http": p, "https": p} if p else None)(proxy)
@abstractmethod
def scrape(self, scraper_input: ScraperInput) -> JobResponse: ...

View File

@@ -34,12 +34,12 @@ from ...jobs import (
class GlassdoorScraper(Scraper):
def __init__(self, proxy: Optional[str] = None):
def __init__(self, proxies: list[str] | str | None = None):
"""
Initializes GlassdoorScraper with the Glassdoor job search url
"""
site = Site(Site.GLASSDOOR)
super().__init__(site, proxy=proxy)
super().__init__(site, proxies=proxies)
self.base_url = None
self.country = None
@@ -59,7 +59,7 @@ class GlassdoorScraper(Scraper):
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
self.base_url = self.scraper_input.country.get_glassdoor_url()
self.session = create_session(self.proxy, is_tls=True, has_retry=True)
self.session = create_session(proxies=self.proxies, is_tls=True, has_retry=True)
token = self._get_csrf_token()
self.headers["gd-csrf-token"] = token if token else self.fallback_token
@@ -245,7 +245,6 @@ class GlassdoorScraper(Scraper):
if not location or is_remote:
return "11047", "STATE" # remote options
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
session = create_session(self.proxy, has_retry=True)
res = self.session.get(url, headers=self.headers)
if res.status_code != 200:
if res.status_code == 429:

View File

@@ -12,14 +12,13 @@ from typing import Tuple
from datetime import datetime
from concurrent.futures import ThreadPoolExecutor, Future
import requests
from .. import Scraper, ScraperInput, Site
from ..utils import (
extract_emails_from_text,
get_enum_from_job_type,
markdown_converter,
logger,
create_session,
)
from ...jobs import (
JobPost,
@@ -33,10 +32,13 @@ from ...jobs import (
class IndeedScraper(Scraper):
def __init__(self, proxy: str | None = None):
def __init__(self, proxies: list[str] | str | None = None):
"""
Initializes IndeedScraper with the Indeed API url
"""
super().__init__(Site.INDEED, proxies=proxies)
self.session = create_session(proxies=self.proxies, is_tls=False)
self.scraper_input = None
self.jobs_per_page = 100
self.num_workers = 10
@@ -45,8 +47,6 @@ class IndeedScraper(Scraper):
self.api_country_code = None
self.base_url = None
self.api_url = "https://apis.indeed.com/graphql"
site = Site(Site.INDEED)
super().__init__(site, proxy=proxy)
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
"""
@@ -90,13 +90,13 @@ class IndeedScraper(Scraper):
jobs = []
new_cursor = None
filters = self._build_filters()
search_term = self.scraper_input.search_term.replace('"', '\\"') if self.scraper_input.search_term else ""
search_term = (
self.scraper_input.search_term.replace('"', '\\"')
if self.scraper_input.search_term
else ""
)
query = self.job_search_query.format(
what=(
f'what: "{search_term}"'
if search_term
else ""
),
what=(f'what: "{search_term}"' if search_term else ""),
location=(
f'location: {{where: "{self.scraper_input.location}", radius: {self.scraper_input.distance}, radiusUnit: MILES}}'
if self.scraper_input.location
@@ -111,11 +111,10 @@ class IndeedScraper(Scraper):
}
api_headers = self.api_headers.copy()
api_headers["indeed-co"] = self.api_country_code
response = requests.post(
response = self.session.post(
self.api_url,
headers=api_headers,
json=payload,
proxies=self.proxy,
timeout=10,
)
if response.status_code != 200:

View File

@@ -10,14 +10,13 @@ from __future__ import annotations
import time
import random
import regex as re
import urllib.parse
from typing import Optional
from datetime import datetime
from threading import Lock
from bs4.element import Tag
from bs4 import BeautifulSoup
from urllib.parse import urlparse, urlunparse
from urllib.parse import urlparse, urlunparse, unquote
from .. import Scraper, ScraperInput, Site
from ..exceptions import LinkedInException
@@ -46,11 +45,19 @@ class LinkedInScraper(Scraper):
band_delay = 4
jobs_per_page = 25
def __init__(self, proxy: Optional[str] = None):
def __init__(self, proxies: list[str] | str | None = None):
"""
Initializes LinkedInScraper with the LinkedIn job search url
"""
super().__init__(Site(Site.LINKEDIN), proxy=proxy)
super().__init__(Site.LINKEDIN, proxies=proxies)
self.session = create_session(
proxies=self.proxies,
is_tls=False,
has_retry=True,
delay=5,
clear_cookies=True,
)
self.session.headers.update(self.headers)
self.scraper_input = None
self.country = "worldwide"
self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
@@ -74,7 +81,6 @@ class LinkedInScraper(Scraper):
)
while continue_search():
logger.info(f"LinkedIn search page: {page // 25 + 1}")
session = create_session(is_tls=False, has_retry=True, delay=5)
params = {
"keywords": scraper_input.search_term,
"location": scraper_input.location,
@@ -99,12 +105,9 @@ class LinkedInScraper(Scraper):
params = {k: v for k, v in params.items() if v is not None}
try:
response = session.get(
response = self.session.get(
f"{self.base_url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
params=params,
allow_redirects=True,
proxies=self.proxy,
headers=self.headers,
timeout=10,
)
if response.status_code not in range(200, 400):
@@ -241,10 +244,7 @@ class LinkedInScraper(Scraper):
:return: dict
"""
try:
session = create_session(is_tls=False, has_retry=True)
response = session.get(
job_page_url, headers=self.headers, timeout=5, proxies=self.proxy
)
response = self.session.get(job_page_url, timeout=5)
response.raise_for_status()
except:
return {}
@@ -340,7 +340,7 @@ class LinkedInScraper(Scraper):
job_url_direct_content.decode_contents().strip()
)
if job_url_direct_match:
job_url_direct = urllib.parse.unquote(job_url_direct_match.group())
job_url_direct = unquote(job_url_direct_match.group())
return job_url_direct

View File

@@ -2,6 +2,8 @@ from __future__ import annotations
import re
import logging
from itertools import cycle
import requests
import tls_client
import numpy as np
@@ -21,6 +23,104 @@ if not logger.handlers:
logger.addHandler(console_handler)
class RotatingProxySession:
def __init__(self, proxies=None):
if isinstance(proxies, str):
self.proxy_cycle = cycle([self.format_proxy(proxies)])
elif isinstance(proxies, list):
self.proxy_cycle = (
cycle([self.format_proxy(proxy) for proxy in proxies])
if proxies
else None
)
else:
self.proxy_cycle = None
@staticmethod
def format_proxy(proxy):
"""Utility method to format a proxy string into a dictionary."""
if proxy.startswith("http://") or proxy.startswith("https://"):
return {"http": proxy, "https": proxy}
return {"http": f"http://{proxy}", "https": f"http://{proxy}"}
class RequestsRotating(RotatingProxySession, requests.Session):
def __init__(self, proxies=None, has_retry=False, delay=1, clear_cookies=False):
RotatingProxySession.__init__(self, proxies=proxies)
requests.Session.__init__(self)
self.clear_cookies = clear_cookies
self.allow_redirects = True
self.setup_session(has_retry, delay)
def setup_session(self, has_retry, delay):
if has_retry:
retries = Retry(
total=3,
connect=3,
status=3,
status_forcelist=[500, 502, 503, 504, 429],
backoff_factor=delay,
)
adapter = HTTPAdapter(max_retries=retries)
self.mount("http://", adapter)
self.mount("https://", adapter)
def request(self, method, url, **kwargs):
if self.clear_cookies:
self.cookies.clear()
if self.proxy_cycle:
next_proxy = next(self.proxy_cycle)
if next_proxy["http"] != "http://localhost":
self.proxies = next_proxy
else:
self.proxies = {}
return requests.Session.request(self, method, url, **kwargs)
class TLSRotating(RotatingProxySession, tls_client.Session):
def __init__(self, proxies=None):
RotatingProxySession.__init__(self, proxies=proxies)
tls_client.Session.__init__(self, random_tls_extension_order=True)
def execute_request(self, *args, **kwargs):
if self.proxy_cycle:
next_proxy = next(self.proxy_cycle)
if next_proxy["http"] != "http://localhost":
self.proxies = next_proxy
else:
self.proxies = {}
response = tls_client.Session.execute_request(self, *args, **kwargs)
return response
def create_session(
*,
proxies: dict | str | None = None,
is_tls: bool = True,
has_retry: bool = False,
delay: int = 1,
clear_cookies: bool = False,
) -> requests.Session:
"""
Creates a requests session with optional tls, proxy, and retry settings.
:return: A session object
"""
if is_tls:
session = TLSRotating(proxies=proxies)
else:
session = RequestsRotating(
proxies=proxies,
has_retry=has_retry,
delay=delay,
clear_cookies=clear_cookies,
)
return session
def set_logger_level(verbose: int = 2):
"""
Adjusts the logger's level. This function allows the logging level to be changed at runtime.
@@ -52,39 +152,6 @@ def extract_emails_from_text(text: str) -> list[str] | None:
return email_regex.findall(text)
def create_session(
proxy: dict | None = None,
is_tls: bool = True,
has_retry: bool = False,
delay: int = 1,
) -> requests.Session:
"""
Creates a requests session with optional tls, proxy, and retry settings.
:return: A session object
"""
if is_tls:
session = tls_client.Session(random_tls_extension_order=True)
session.proxies = proxy
else:
session = requests.Session()
session.allow_redirects = True
if proxy:
session.proxies.update(proxy)
if has_retry:
retries = Retry(
total=3,
connect=3,
status=3,
status_forcelist=[500, 502, 503, 504, 429],
backoff_factor=delay,
)
adapter = HTTPAdapter(max_retries=retries)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session
def get_enum_from_job_type(job_type_str: str) -> JobType | None:
"""
Given a string, returns the corresponding JobType enum member if a match is found.

View File

@@ -36,14 +36,15 @@ class ZipRecruiterScraper(Scraper):
base_url = "https://www.ziprecruiter.com"
api_url = "https://api.ziprecruiter.com"
def __init__(self, proxy: Optional[str] = None):
def __init__(self, proxies: list[str] | str | None = None):
"""
Initializes ZipRecruiterScraper with the ZipRecruiter job search url
"""
super().__init__(Site.ZIP_RECRUITER, proxies=proxies)
self.scraper_input = None
self.session = create_session(proxy)
self.session = create_session(proxies=proxies)
self._get_cookies()
super().__init__(Site.ZIP_RECRUITER, proxy=proxy)
self.delay = 5
self.jobs_per_page = 20
@@ -151,7 +152,7 @@ class ZipRecruiterScraper(Scraper):
comp_max = int(job["compensation_max"]) if "compensation_max" in job else None
comp_currency = job.get("compensation_currency")
return JobPost(
id=str(job['listing_key']),
id=str(job["listing_key"]),
title=title,
company_name=company,
location=location,