Add czech to Indeed (#72)

pull/77/head v.1.1.30
Augusto Gunsch 2023-12-02 09:42:54 +01:00 committed by GitHub
parent 6587e464fa
commit 33d442bf1e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 53 additions and 60 deletions

View File

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "python-jobspy" name = "python-jobspy"
version = "1.1.29" version = "1.1.30"
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter" description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"] authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
homepage = "https://github.com/Bunsly/JobSpy" homepage = "https://github.com/Bunsly/JobSpy"

View File

@ -55,18 +55,24 @@ class JobType(Enum):
class Country(Enum): class Country(Enum):
ARGENTINA = ("argentina", "com.ar") """
Gets the subdomain for Indeed and Glassdoor.
The second item in the tuple is the subdomain for Indeed
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
"""
ARGENTINA = ("argentina", "ar", "com.ar")
AUSTRALIA = ("australia", "au", "com.au") AUSTRALIA = ("australia", "au", "com.au")
AUSTRIA = ("austria", "at", "at") AUSTRIA = ("austria", "at", "at")
BAHRAIN = ("bahrain", "bh") BAHRAIN = ("bahrain", "bh")
BELGIUM = ("belgium", "be", "nl:be") BELGIUM = ("belgium", "be", "fr:be")
BRAZIL = ("brazil", "br", "com.br") BRAZIL = ("brazil", "br", "com.br")
CANADA = ("canada", "ca", "ca") CANADA = ("canada", "ca", "ca")
CHILE = ("chile", "cl") CHILE = ("chile", "cl")
CHINA = ("china", "cn") CHINA = ("china", "cn")
COLOMBIA = ("colombia", "co") COLOMBIA = ("colombia", "co")
COSTARICA = ("costa rica", "cr") COSTARICA = ("costa rica", "cr")
CZECHREPUBLIC = ("czech republic", "cz") CZECHREPUBLIC = ("czech republic,czechia", "cz")
DENMARK = ("denmark", "dk") DENMARK = ("denmark", "dk")
ECUADOR = ("ecuador", "ec") ECUADOR = ("ecuador", "ec")
EGYPT = ("egypt", "eg") EGYPT = ("egypt", "eg")
@ -112,8 +118,8 @@ class Country(Enum):
TURKEY = ("turkey", "tr") TURKEY = ("turkey", "tr")
UKRAINE = ("ukraine", "ua") UKRAINE = ("ukraine", "ua")
UNITEDARABEMIRATES = ("united arab emirates", "ae") UNITEDARABEMIRATES = ("united arab emirates", "ae")
UK = ("uk", "uk", "co.uk") UK = ("uk,united kingdom", "uk", "co.uk")
USA = ("usa", "www", "com") USA = ("usa,us,united states", "www", "com")
URUGUAY = ("uruguay", "uy") URUGUAY = ("uruguay", "uy")
VENEZUELA = ("venezuela", "ve") VENEZUELA = ("venezuela", "ve")
VIETNAM = ("vietnam", "vn") VIETNAM = ("vietnam", "vn")
@ -147,7 +153,8 @@ class Country(Enum):
"""Convert a string to the corresponding Country enum.""" """Convert a string to the corresponding Country enum."""
country_str = country_str.strip().lower() country_str = country_str.strip().lower()
for country in cls: for country in cls:
if country.value[0] == country_str: country_names = country.value[0].split(',')
if country_str in country_names:
return country return country
valid_countries = [country.value for country in cls] valid_countries = [country.value for country in cls]
raise ValueError( raise ValueError(
@ -167,10 +174,13 @@ class Location(BaseModel):
if self.state: if self.state:
location_parts.append(self.state) location_parts.append(self.state)
if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE): if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE):
if self.country.value[0] in ("usa", "uk"): country_name = self.country.value[0]
location_parts.append(self.country.value[0].upper()) if "," in country_name:
country_name = country_name.split(",")[0]
if country_name in ("usa", "uk"):
location_parts.append(country_name.upper())
else: else:
location_parts.append(self.country.value[0].title()) location_parts.append(country_name.title())
return ", ".join(location_parts) return ", ".join(location_parts)
@ -181,6 +191,10 @@ class CompensationInterval(Enum):
DAILY = "daily" DAILY = "daily"
HOURLY = "hourly" HOURLY = "hourly"
@classmethod
def get_interval(cls, pay_period):
return cls[pay_period].value if pay_period in cls.__members__ else None
class Compensation(BaseModel): class Compensation(BaseModel):
interval: Optional[CompensationInterval] = None interval: Optional[CompensationInterval] = None

View File

@ -4,17 +4,13 @@ jobspy.scrapers.glassdoor
This module contains routines to scrape Glassdoor. This module contains routines to scrape Glassdoor.
""" """
import math
import time
import re
import json import json
from datetime import datetime, date from typing import Optional, Any
from typing import Optional, Tuple, Any from datetime import datetime, timedelta
from bs4 import BeautifulSoup
from .. import Scraper, ScraperInput, Site from .. import Scraper, ScraperInput, Site
from ..exceptions import GlassdoorException from ..exceptions import GlassdoorException
from ..utils import count_urgent_words, extract_emails_from_text, create_session from ..utils import create_session
from ...jobs import ( from ...jobs import (
JobPost, JobPost,
Compensation, Compensation,
@ -22,7 +18,6 @@ from ...jobs import (
Location, Location,
JobResponse, JobResponse,
JobType, JobType,
Country,
) )
@ -49,9 +44,6 @@ class GlassdoorScraper(Scraper):
) -> (list[JobPost], str | None): ) -> (list[JobPost], str | None):
""" """
Scrapes a page of Glassdoor for jobs with scraper_input criteria Scrapes a page of Glassdoor for jobs with scraper_input criteria
:param scraper_input:
:return: jobs found on page
:return: cursor for next page
""" """
try: try:
payload = self.add_payload( payload = self.add_payload(
@ -86,8 +78,9 @@ class GlassdoorScraper(Scraper):
company_name = job["header"]["employerNameFromSearch"] company_name = job["header"]["employerNameFromSearch"]
location_name = job["header"].get("locationName", "") location_name = job["header"].get("locationName", "")
location_type = job["header"].get("locationType", "") location_type = job["header"].get("locationType", "")
is_remote = False age_in_days = job["header"].get("ageInDays")
location = None is_remote, location = False, None
date_posted = (datetime.now() - timedelta(days=age_in_days)).date() if age_in_days else None
if location_type == "S": if location_type == "S":
is_remote = True is_remote = True
@ -99,10 +92,11 @@ class GlassdoorScraper(Scraper):
job = JobPost( job = JobPost(
title=title, title=title,
company_name=company_name, company_name=company_name,
date_posted=date_posted,
job_url=job_url, job_url=job_url,
location=location, location=location,
compensation=compensation, compensation=compensation,
is_remote=is_remote, is_remote=is_remote
) )
jobs.append(job) jobs.append(job)
@ -161,15 +155,8 @@ class GlassdoorScraper(Scraper):
interval = None interval = None
if pay_period == "ANNUAL": if pay_period == "ANNUAL":
interval = CompensationInterval.YEARLY interval = CompensationInterval.YEARLY
elif pay_period == "MONTHLY": elif pay_period:
interval = CompensationInterval.MONTHLY interval = CompensationInterval.get_interval(pay_period)
elif pay_period == "WEEKLY":
interval = CompensationInterval.WEEKLY
elif pay_period == "DAILY":
interval = CompensationInterval.DAILY
elif pay_period == "HOURLY":
interval = CompensationInterval.HOURLY
min_amount = int(adjusted_pay.get("p10") // 1) min_amount = int(adjusted_pay.get("p10") // 1)
max_amount = int(adjusted_pay.get("p90") // 1) max_amount = int(adjusted_pay.get("p90") // 1)
@ -180,12 +167,6 @@ class GlassdoorScraper(Scraper):
currency=currency, currency=currency,
) )
def get_job_type_enum(self, job_type_str: str) -> list[JobType] | None:
for job_type in JobType:
if job_type_str in job_type.value:
return [job_type]
return None
def get_location(self, location: str, is_remote: bool) -> (int, str): def get_location(self, location: str, is_remote: bool) -> (int, str):
if not location or is_remote: if not location or is_remote:
return "11047", "STATE" # remote options return "11047", "STATE" # remote options
@ -243,10 +224,17 @@ class GlassdoorScraper(Scraper):
payload["variables"]["filterParams"].append( payload["variables"]["filterParams"].append(
{"filterKey": "jobType", "values": filter_value} {"filterKey": "jobType", "values": filter_value}
) )
return json.dumps([payload]) return json.dumps([payload])
def parse_location(self, location_name: str) -> Location: @staticmethod
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
for job_type in JobType:
if job_type_str in job_type.value:
return [job_type]
return None
@staticmethod
def parse_location(location_name: str) -> Location:
if not location_name or location_name == "Remote": if not location_name or location_name == "Remote":
return None return None
city, _, state = location_name.partition(", ") city, _, state = location_name.partition(", ")

View File

@ -64,6 +64,7 @@ class IndeedScraper(Scraper):
"l": scraper_input.location, "l": scraper_input.location,
"filter": 0, "filter": 0,
"start": scraper_input.offset + page * 10, "start": scraper_input.offset + page * 10,
"sort": "date"
} }
if scraper_input.distance: if scraper_input.distance:
params["radius"] = scraper_input.distance params["radius"] = scraper_input.distance
@ -150,6 +151,7 @@ class IndeedScraper(Scraper):
title=job["normTitle"], title=job["normTitle"],
description=description, description=description,
company_name=job["company"], company_name=job["company"],
company_url=self.url + job["companyOverviewLink"] if "companyOverviewLink" in job else None,
location=Location( location=Location(
city=job.get("jobLocationCity"), city=job.get("jobLocationCity"),
state=job.get("jobLocationState"), state=job.get("jobLocationState"),
@ -305,7 +307,7 @@ class IndeedScraper(Scraper):
raise IndeedException("Could not find mosaic provider job cards data") raise IndeedException("Could not find mosaic provider job cards data")
else: else:
raise IndeedException( raise IndeedException(
"Could not find a script tag containing mosaic provider data" "Could not find any results for the search"
) )
@staticmethod @staticmethod

View File

@ -39,12 +39,6 @@ def create_session(proxy: dict | None = None, is_tls: bool = True):
random_tls_extension_order=True, random_tls_extension_order=True,
) )
session.proxies = proxy session.proxies = proxy
# TODO multiple proxies
# if self.proxies:
# session.proxies = {
# "http": random.choice(self.proxies),
# "https": random.choice(self.proxies),
# }
else: else:
session = requests.Session() session = requests.Session()
session.allow_redirects = True session.allow_redirects = True

View File

@ -44,12 +44,12 @@ class ZipRecruiterScraper(Scraper):
if continue_token: if continue_token:
params["continue"] = continue_token params["continue"] = continue_token
try: try:
session = create_session(self.proxy, is_tls=False) session = create_session(self.proxy, is_tls=True)
response = session.get( response = session.get(
f"https://api.ziprecruiter.com/jobs-app/jobs", f"https://api.ziprecruiter.com/jobs-app/jobs",
headers=self.headers(), headers=self.headers(),
params=self.add_params(scraper_input), params=self.add_params(scraper_input),
timeout=10, timeout_seconds=10,
) )
if response.status_code != 200: if response.status_code != 200:
raise ZipRecruiterException( raise ZipRecruiterException(
@ -195,17 +195,12 @@ class ZipRecruiterScraper(Scraper):
@staticmethod @staticmethod
def headers() -> dict: def headers() -> dict:
""" """
Returns headers needed for requests Returns headers needed for ZipRecruiter API requests
:return: dict - Dictionary containing headers :return: dict - Dictionary containing headers
""" """
return { return {
"Host": "api.ziprecruiter.com", 'Host': 'api.ziprecruiter.com',
"Cookie": "ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38; SplitSV=2016-10-19%3AU2FsdGVkX19f9%2Bx70knxc%2FeR3xXR8lWoTcYfq5QjmLU%3D%0A; __cf_bm=qXim3DtLPbOL83GIp.ddQEOFVFTc1OBGPckiHYxcz3o-1698521532-0-AfUOCkgCZyVbiW1ziUwyefCfzNrJJTTKPYnif1FZGQkT60dMowmSU/Y/lP+WiygkFPW/KbYJmyc+MQSkkad5YygYaARflaRj51abnD+SyF9V; zglobalid=68d49bd5-0326-428e-aba8-8a04b64bc67c.af2d99ff7c03.653d61bb; ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38", 'accept': '*/*',
"accept": "*/*", 'authorization': 'Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==',
"x-zr-zva-override": "100000000;vid:ZT1huzm_EQlDTVEc", 'Cookie': '__cf_bm=DZ7eJOw6lka.Bwy5jLeDqWanaZ8BJlVAwaXrmcbYnxM-1701505132-0-AfGaVIfTA2kJlmleK14o722vbVwpZ+4UxFznsWv+guvzXSpD9KVEy/+pNzvEZUx88yaEShJwGt3/EVjhHirX/ASustKxg47V/aXRd2XIO2QN; zglobalid=61f94830-1990-4130-b222-d9d0e09c7825.57da9ea9581c.656ae86b; ziprecruiter_browser=018188e0-045b-4ad7-aa50-627a6c3d43aa; ziprecruiter_session=5259b2219bf95b6d2299a1417424bc2edc9f4b38; zva=100000000%3Bvid%3AZWroa0x_F1KEeGeU'
"x-pushnotificationid": "0ff4983d38d7fc5b3370297f2bcffcf4b3321c418f5c22dd152a0264707602a0",
"x-deviceid": "D77B3A92-E589-46A4-8A39-6EF6F1D86006",
"user-agent": "Job Search/87.0 (iPhone; CPU iOS 16_6_1 like Mac OS X)",
"authorization": "Basic YTBlZjMyZDYtN2I0Yy00MWVkLWEyODMtYTI1NDAzMzI0YTcyOg==",
"accept-language": "en-US,en;q=0.9",
} }