mirror of https://github.com/Bunsly/JobSpy
parent
a4f6851c32
commit
0a669e9ba8
29
README.md
29
README.md
|
@ -21,7 +21,7 @@ Updated for release v1.1.3
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
```
|
```
|
||||||
pip install python-jobspy
|
pip install -U python-jobspy
|
||||||
```
|
```
|
||||||
|
|
||||||
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
_Python version >= [3.10](https://www.python.org/downloads/release/python-3100/) required_
|
||||||
|
@ -64,8 +64,8 @@ Required
|
||||||
├── site_type (List[enum]): linkedin, zip_recruiter, indeed, glassdoor
|
├── site_type (List[enum]): linkedin, zip_recruiter, indeed, glassdoor
|
||||||
└── search_term (str)
|
└── search_term (str)
|
||||||
Optional
|
Optional
|
||||||
├── location (int)
|
├── location (str)
|
||||||
├── distance (int): in miles
|
├── distance (int): in miles, default 50
|
||||||
├── job_type (enum): fulltime, parttime, internship, contract
|
├── job_type (enum): fulltime, parttime, internship, contract
|
||||||
├── proxy (str): in format 'http://user:pass@host:port'
|
├── proxy (str): in format 'http://user:pass@host:port'
|
||||||
├── is_remote (bool)
|
├── is_remote (bool)
|
||||||
|
@ -76,7 +76,7 @@ Optional
|
||||||
├── description_format (enum): markdown, html (format type of the job descriptions)
|
├── description_format (enum): markdown, html (format type of the job descriptions)
|
||||||
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
├── country_indeed (enum): filters the country on Indeed (see below for correct spelling)
|
||||||
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
├── offset (num): starts the search from an offset (e.g. 25 will start the search from the 25th result)
|
||||||
├── hours_old (int): filters jobs by the number of hours since the job was posted (all but LinkedIn rounds up to next day)
|
├── hours_old (int): filters jobs by the number of hours since the job was posted (ZipRecruiter and Glassdoor round up to next day. If you use this on Indeed, it will not filter by job_type or is_remote)
|
||||||
```
|
```
|
||||||
|
|
||||||
### JobPost Schema
|
### JobPost Schema
|
||||||
|
@ -100,15 +100,26 @@ JobPost
|
||||||
│ └── currency (enum)
|
│ └── currency (enum)
|
||||||
└── date_posted (date)
|
└── date_posted (date)
|
||||||
└── emails (str)
|
└── emails (str)
|
||||||
└── num_urgent_words (int)
|
|
||||||
└── is_remote (bool)
|
└── is_remote (bool)
|
||||||
|
|
||||||
|
Indeed specific
|
||||||
|
├── company_country (str)
|
||||||
|
└── company_addresses (str)
|
||||||
|
└── company_industry (str)
|
||||||
|
└── company_employees_label (str)
|
||||||
|
└── company_revenue_label (str)
|
||||||
|
└── company_description (str)
|
||||||
|
└── ceo_name (str)
|
||||||
|
└── ceo_photo_url (str)
|
||||||
|
└── logo_photo_url (str)
|
||||||
|
└── banner_photo_url (str)
|
||||||
```
|
```
|
||||||
|
|
||||||
## Supported Countries for Job Searching
|
## Supported Countries for Job Searching
|
||||||
|
|
||||||
### **LinkedIn**
|
### **LinkedIn**
|
||||||
|
|
||||||
LinkedIn searches globally & uses only the `location` parameter. You can only fetch 1000 jobs max from the LinkedIn endpoint we're using
|
LinkedIn searches globally & uses only the `location` parameter. You can only fetch 1000 jobs max from the LinkedIn endpoint we are using
|
||||||
|
|
||||||
### **ZipRecruiter**
|
### **ZipRecruiter**
|
||||||
|
|
||||||
|
@ -141,7 +152,11 @@ You can specify the following countries when searching on Indeed (use the exact
|
||||||
| Venezuela | Vietnam* | | |
|
| Venezuela | Vietnam* | | |
|
||||||
|
|
||||||
|
|
||||||
Glassdoor can only fetch 900 jobs from the endpoint we're using on a given search.
|
## Notes
|
||||||
|
* Indeed is the best scraper currently with no rate limiting.
|
||||||
|
* Glassdoor can only fetch 900 jobs from the endpoint we're using on a given search.
|
||||||
|
* LinkedIn is the most restrictive and usually rate limits on around the 10th page
|
||||||
|
* ZipRecruiter is okay but has a 5 second delay in between each page to avoid rate limiting.
|
||||||
## Frequently Asked Questions
|
## Frequently Asked Questions
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "python-jobspy"
|
name = "python-jobspy"
|
||||||
version = "1.1.47"
|
version = "1.1.48"
|
||||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||||
homepage = "https://github.com/Bunsly/JobSpy"
|
homepage = "https://github.com/Bunsly/JobSpy"
|
||||||
|
|
|
@ -3,6 +3,7 @@ from typing import Tuple
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
|
|
||||||
from .jobs import JobType, Location
|
from .jobs import JobType, Location
|
||||||
|
from .scrapers.utils import logger
|
||||||
from .scrapers.indeed import IndeedScraper
|
from .scrapers.indeed import IndeedScraper
|
||||||
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
from .scrapers.ziprecruiter import ZipRecruiterScraper
|
||||||
from .scrapers.glassdoor import GlassdoorScraper
|
from .scrapers.glassdoor import GlassdoorScraper
|
||||||
|
@ -20,7 +21,7 @@ def scrape_jobs(
|
||||||
site_name: str | list[str] | Site | list[Site] | None = None,
|
site_name: str | list[str] | Site | list[Site] | None = None,
|
||||||
search_term: str | None = None,
|
search_term: str | None = None,
|
||||||
location: str | None = None,
|
location: str | None = None,
|
||||||
distance: int | None = None,
|
distance: int | None = 50,
|
||||||
is_remote: bool = False,
|
is_remote: bool = False,
|
||||||
job_type: str | None = None,
|
job_type: str | None = None,
|
||||||
easy_apply: bool | None = None,
|
easy_apply: bool | None = None,
|
||||||
|
@ -92,6 +93,8 @@ def scrape_jobs(
|
||||||
scraper_class = SCRAPER_MAPPING[site]
|
scraper_class = SCRAPER_MAPPING[site]
|
||||||
scraper = scraper_class(proxy=proxy)
|
scraper = scraper_class(proxy=proxy)
|
||||||
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
scraped_data: JobResponse = scraper.scrape(scraper_input)
|
||||||
|
site_name = 'ZipRecruiter' if site.value.capitalize() == 'Zip_recruiter' else site.value.capitalize()
|
||||||
|
logger.info(f"{site_name} finished scraping")
|
||||||
return site.value, scraped_data
|
return site.value, scraped_data
|
||||||
|
|
||||||
site_to_jobs_dict = {}
|
site_to_jobs_dict = {}
|
||||||
|
@ -160,11 +163,11 @@ def scrape_jobs(
|
||||||
|
|
||||||
# Desired column order
|
# Desired column order
|
||||||
desired_order = [
|
desired_order = [
|
||||||
"job_url_hyper" if hyperlinks else "job_url",
|
|
||||||
"site",
|
"site",
|
||||||
|
"job_url_hyper" if hyperlinks else "job_url",
|
||||||
|
"job_url_direct",
|
||||||
"title",
|
"title",
|
||||||
"company",
|
"company",
|
||||||
"company_url",
|
|
||||||
"location",
|
"location",
|
||||||
"job_type",
|
"job_type",
|
||||||
"date_posted",
|
"date_posted",
|
||||||
|
@ -173,10 +176,20 @@ def scrape_jobs(
|
||||||
"max_amount",
|
"max_amount",
|
||||||
"currency",
|
"currency",
|
||||||
"is_remote",
|
"is_remote",
|
||||||
"num_urgent_words",
|
|
||||||
"benefits",
|
|
||||||
"emails",
|
"emails",
|
||||||
"description",
|
"description",
|
||||||
|
|
||||||
|
"company_url",
|
||||||
|
"company_url_direct",
|
||||||
|
"company_addresses",
|
||||||
|
"company_industry",
|
||||||
|
"company_num_employees",
|
||||||
|
"company_revenue",
|
||||||
|
"company_description",
|
||||||
|
"logo_photo_url",
|
||||||
|
"banner_photo_url",
|
||||||
|
"ceo_name",
|
||||||
|
"ceo_photo_url",
|
||||||
]
|
]
|
||||||
|
|
||||||
# Step 3: Ensure all desired columns are present, adding missing ones as empty
|
# Step 3: Ensure all desired columns are present, adding missing ones as empty
|
||||||
|
|
|
@ -57,7 +57,7 @@ class JobType(Enum):
|
||||||
class Country(Enum):
|
class Country(Enum):
|
||||||
"""
|
"""
|
||||||
Gets the subdomain for Indeed and Glassdoor.
|
Gets the subdomain for Indeed and Glassdoor.
|
||||||
The second item in the tuple is the subdomain for Indeed
|
The second item in the tuple is the subdomain (and API country code if there's a ':' separator) for Indeed
|
||||||
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
|
The third item in the tuple is the subdomain (and tld if there's a ':' separator) for Glassdoor
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
@ -118,8 +118,8 @@ class Country(Enum):
|
||||||
TURKEY = ("turkey", "tr")
|
TURKEY = ("turkey", "tr")
|
||||||
UKRAINE = ("ukraine", "ua")
|
UKRAINE = ("ukraine", "ua")
|
||||||
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
UNITEDARABEMIRATES = ("united arab emirates", "ae")
|
||||||
UK = ("uk,united kingdom", "uk", "co.uk")
|
UK = ("uk,united kingdom", "uk:gb", "co.uk")
|
||||||
USA = ("usa,us,united states", "www", "com")
|
USA = ("usa,us,united states", "www:us", "com")
|
||||||
URUGUAY = ("uruguay", "uy")
|
URUGUAY = ("uruguay", "uy")
|
||||||
VENEZUELA = ("venezuela", "ve")
|
VENEZUELA = ("venezuela", "ve")
|
||||||
VIETNAM = ("vietnam", "vn", "com")
|
VIETNAM = ("vietnam", "vn", "com")
|
||||||
|
@ -132,7 +132,10 @@ class Country(Enum):
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def indeed_domain_value(self):
|
def indeed_domain_value(self):
|
||||||
return self.value[1]
|
subdomain, _, api_country_code = self.value[1].partition(":")
|
||||||
|
if subdomain and api_country_code:
|
||||||
|
return subdomain, api_country_code.upper()
|
||||||
|
return self.value[1], self.value[1].upper()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def glassdoor_domain_value(self):
|
def glassdoor_domain_value(self):
|
||||||
|
@ -163,7 +166,7 @@ class Country(Enum):
|
||||||
|
|
||||||
|
|
||||||
class Location(BaseModel):
|
class Location(BaseModel):
|
||||||
country: Country | None = None
|
country: Country | str | None = None
|
||||||
city: Optional[str] = None
|
city: Optional[str] = None
|
||||||
state: Optional[str] = None
|
state: Optional[str] = None
|
||||||
|
|
||||||
|
@ -173,7 +176,9 @@ class Location(BaseModel):
|
||||||
location_parts.append(self.city)
|
location_parts.append(self.city)
|
||||||
if self.state:
|
if self.state:
|
||||||
location_parts.append(self.state)
|
location_parts.append(self.state)
|
||||||
if self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE):
|
if isinstance(self.country, str):
|
||||||
|
location_parts.append(self.country)
|
||||||
|
elif self.country and self.country not in (Country.US_CANADA, Country.WORLDWIDE):
|
||||||
country_name = self.country.value[0]
|
country_name = self.country.value[0]
|
||||||
if "," in country_name:
|
if "," in country_name:
|
||||||
country_name = country_name.split(",")[0]
|
country_name = country_name.split(",")[0]
|
||||||
|
@ -217,21 +222,31 @@ class DescriptionFormat(Enum):
|
||||||
|
|
||||||
class JobPost(BaseModel):
|
class JobPost(BaseModel):
|
||||||
title: str
|
title: str
|
||||||
company_name: str
|
company_name: str | None
|
||||||
job_url: str
|
job_url: str
|
||||||
|
job_url_direct: str | None = None
|
||||||
location: Optional[Location]
|
location: Optional[Location]
|
||||||
|
|
||||||
description: str | None = None
|
description: str | None = None
|
||||||
company_url: str | None = None
|
company_url: str | None = None
|
||||||
|
company_url_direct: str | None = None
|
||||||
|
|
||||||
job_type: list[JobType] | None = None
|
job_type: list[JobType] | None = None
|
||||||
compensation: Compensation | None = None
|
compensation: Compensation | None = None
|
||||||
date_posted: date | None = None
|
date_posted: date | None = None
|
||||||
benefits: str | None = None
|
|
||||||
emails: list[str] | None = None
|
emails: list[str] | None = None
|
||||||
num_urgent_words: int | None = None
|
|
||||||
is_remote: bool | None = None
|
is_remote: bool | None = None
|
||||||
# company_industry: str | None = None
|
|
||||||
|
# indeed specific
|
||||||
|
company_addresses: str | None = None
|
||||||
|
company_industry: str | None = None
|
||||||
|
company_num_employees: str | None = None
|
||||||
|
company_revenue: str | None = None
|
||||||
|
company_description: str | None = None
|
||||||
|
ceo_name: str | None = None
|
||||||
|
ceo_photo_url: str | None = None
|
||||||
|
logo_photo_url: str | None = None
|
||||||
|
banner_photo_url: str | None = None
|
||||||
|
|
||||||
|
|
||||||
class JobResponse(BaseModel):
|
class JobResponse(BaseModel):
|
||||||
|
|
|
@ -11,7 +11,7 @@ import requests
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from datetime import datetime, timedelta
|
from datetime import datetime, timedelta
|
||||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||||
from ..utils import count_urgent_words, extract_emails_from_text
|
from ..utils import extract_emails_from_text
|
||||||
|
|
||||||
from .. import Scraper, ScraperInput, Site
|
from .. import Scraper, ScraperInput, Site
|
||||||
from ..exceptions import GlassdoorException
|
from ..exceptions import GlassdoorException
|
||||||
|
@ -188,7 +188,6 @@ class GlassdoorScraper(Scraper):
|
||||||
is_remote=is_remote,
|
is_remote=is_remote,
|
||||||
description=description,
|
description=description,
|
||||||
emails=extract_emails_from_text(description) if description else None,
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
num_urgent_words=count_urgent_words(description) if description else None,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def _fetch_job_description(self, job_id):
|
def _fetch_job_description(self, job_id):
|
||||||
|
|
|
@ -4,21 +4,15 @@ jobspy.scrapers.indeed
|
||||||
|
|
||||||
This module contains routines to scrape Indeed.
|
This module contains routines to scrape Indeed.
|
||||||
"""
|
"""
|
||||||
import re
|
|
||||||
import math
|
import math
|
||||||
import json
|
from concurrent.futures import ThreadPoolExecutor, Future
|
||||||
import requests
|
|
||||||
from typing import Any
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
|
|
||||||
from bs4 import BeautifulSoup
|
import requests
|
||||||
from bs4.element import Tag
|
|
||||||
from concurrent.futures import ThreadPoolExecutor, Future
|
|
||||||
|
|
||||||
|
from .. import Scraper, ScraperInput, Site
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
count_urgent_words,
|
|
||||||
extract_emails_from_text,
|
extract_emails_from_text,
|
||||||
create_session,
|
|
||||||
get_enum_from_job_type,
|
get_enum_from_job_type,
|
||||||
markdown_converter,
|
markdown_converter,
|
||||||
logger
|
logger
|
||||||
|
@ -32,18 +26,19 @@ from ...jobs import (
|
||||||
JobType,
|
JobType,
|
||||||
DescriptionFormat
|
DescriptionFormat
|
||||||
)
|
)
|
||||||
from .. import Scraper, ScraperInput, Site
|
|
||||||
|
|
||||||
|
|
||||||
class IndeedScraper(Scraper):
|
class IndeedScraper(Scraper):
|
||||||
def __init__(self, proxy: str | None = None):
|
def __init__(self, proxy: str | None = None):
|
||||||
"""
|
"""
|
||||||
Initializes IndeedScraper with the Indeed job search url
|
Initializes IndeedScraper with the Indeed API url
|
||||||
"""
|
"""
|
||||||
self.scraper_input = None
|
self.scraper_input = None
|
||||||
self.jobs_per_page = 25
|
self.jobs_per_page = 100
|
||||||
self.num_workers = 10
|
self.num_workers = 10
|
||||||
self.seen_urls = set()
|
self.seen_urls = set()
|
||||||
|
self.headers = None
|
||||||
|
self.api_country_code = None
|
||||||
self.base_url = None
|
self.base_url = None
|
||||||
self.api_url = "https://apis.indeed.com/graphql"
|
self.api_url = "https://apis.indeed.com/graphql"
|
||||||
site = Site(Site.INDEED)
|
site = Site(Site.INDEED)
|
||||||
|
@ -56,278 +51,220 @@ class IndeedScraper(Scraper):
|
||||||
:return: job_response
|
:return: job_response
|
||||||
"""
|
"""
|
||||||
self.scraper_input = scraper_input
|
self.scraper_input = scraper_input
|
||||||
job_list = self._scrape_page()
|
domain, self.api_country_code = self.scraper_input.country.indeed_domain_value
|
||||||
pages_processed = 1
|
self.base_url = f"https://{domain}.indeed.com"
|
||||||
|
self.headers = self.api_headers.copy()
|
||||||
|
self.headers['indeed-co'] = self.scraper_input.country.indeed_domain_value
|
||||||
|
job_list = []
|
||||||
|
page = 1
|
||||||
|
|
||||||
|
cursor = None
|
||||||
|
offset_pages = math.ceil(self.scraper_input.offset / 100)
|
||||||
|
for _ in range(offset_pages):
|
||||||
|
logger.info(f'Indeed skipping search page: {page}')
|
||||||
|
__, cursor = self._scrape_page(cursor)
|
||||||
|
if not __:
|
||||||
|
logger.info(f'Indeed found no jobs on page: {page}')
|
||||||
|
break
|
||||||
|
|
||||||
while len(self.seen_urls) < scraper_input.results_wanted:
|
while len(self.seen_urls) < scraper_input.results_wanted:
|
||||||
pages_to_process = math.ceil((scraper_input.results_wanted - len(self.seen_urls)) / self.jobs_per_page)
|
logger.info(f'Indeed search page: {page}')
|
||||||
new_jobs = False
|
jobs, cursor = self._scrape_page(cursor)
|
||||||
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
|
if not jobs:
|
||||||
futures: list[Future] = [
|
logger.info(f'Indeed found no jobs on page: {page}')
|
||||||
executor.submit(self._scrape_page, page + pages_processed)
|
break
|
||||||
for page in range(pages_to_process)
|
|
||||||
]
|
|
||||||
|
|
||||||
for future in futures:
|
|
||||||
jobs = future.result()
|
|
||||||
if jobs:
|
|
||||||
job_list += jobs
|
job_list += jobs
|
||||||
new_jobs = True
|
page += 1
|
||||||
if len(self.seen_urls) >= scraper_input.results_wanted:
|
return JobResponse(jobs=job_list[:scraper_input.results_wanted])
|
||||||
break
|
|
||||||
|
|
||||||
pages_processed += pages_to_process
|
def _scrape_page(self, cursor: str | None) -> (list[JobPost], str | None):
|
||||||
if not new_jobs:
|
|
||||||
break
|
|
||||||
|
|
||||||
if len(self.seen_urls) > scraper_input.results_wanted:
|
|
||||||
job_list = job_list[:scraper_input.results_wanted]
|
|
||||||
|
|
||||||
return JobResponse(jobs=job_list)
|
|
||||||
|
|
||||||
def _scrape_page(self, page: int=0) -> list[JobPost]:
|
|
||||||
"""
|
"""
|
||||||
Scrapes a page of Indeed for jobs with scraper_input criteria
|
Scrapes a page of Indeed for jobs with scraper_input criteria
|
||||||
:param page:
|
:param cursor:
|
||||||
:return: jobs found on page, total number of jobs found for search
|
:return: jobs found on page, next page cursor
|
||||||
"""
|
"""
|
||||||
logger.info(f'Indeed search page: {page + 1}')
|
jobs = []
|
||||||
job_list = []
|
new_cursor = None
|
||||||
domain = self.scraper_input.country.indeed_domain_value
|
filters = self._build_filters()
|
||||||
self.base_url = f"https://{domain}.indeed.com"
|
query = self.job_search_query.format(
|
||||||
|
what=self.scraper_input.search_term,
|
||||||
try:
|
location=self.scraper_input.location if self.scraper_input.location else self.scraper_input.country.value[0].split(',')[-1],
|
||||||
session = create_session(self.proxy)
|
radius=self.scraper_input.distance,
|
||||||
response = session.get(
|
dateOnIndeed=self.scraper_input.hours_old,
|
||||||
f"{self.base_url}/m/jobs",
|
cursor=f'cursor: "{cursor}"' if cursor else '',
|
||||||
headers=self.headers,
|
filters=filters
|
||||||
params=self._add_params(page),
|
|
||||||
)
|
)
|
||||||
if response.status_code not in range(200, 400):
|
payload = {
|
||||||
if response.status_code == 429:
|
'query': query,
|
||||||
logger.error(f'429 Response - Blocked by Indeed for too many requests')
|
}
|
||||||
else:
|
api_headers = self.api_headers.copy()
|
||||||
logger.error(f'Indeed response status code {response.status_code}')
|
api_headers['indeed-co'] = self.api_country_code
|
||||||
return job_list
|
response = requests.post(self.api_url, headers=api_headers, json=payload, proxies=self.proxy, timeout=10)
|
||||||
|
if response.status_code != 200:
|
||||||
except Exception as e:
|
logger.info(f'Indeed responded with status code: {response.status_code} (submit GitHub issue if this appears to be a beg)')
|
||||||
if "Proxy responded with" in str(e):
|
return jobs, new_cursor
|
||||||
logger.error(f'Indeed: Bad proxy')
|
data = response.json()
|
||||||
else:
|
jobs = data['data']['jobSearch']['results']
|
||||||
logger.error(f'Indeed: {str(e)}')
|
new_cursor = data['data']['jobSearch']['pageInfo']['nextCursor']
|
||||||
return job_list
|
|
||||||
|
|
||||||
soup = BeautifulSoup(response.content, "html.parser")
|
|
||||||
if "did not match any jobs" in response.text:
|
|
||||||
return job_list
|
|
||||||
|
|
||||||
jobs = IndeedScraper._parse_jobs(soup)
|
|
||||||
if not jobs:
|
|
||||||
return []
|
|
||||||
if (
|
|
||||||
not jobs.get("metaData", {})
|
|
||||||
.get("mosaicProviderJobCardsModel", {})
|
|
||||||
.get("results")
|
|
||||||
):
|
|
||||||
logger.error("Indeed - No jobs found.")
|
|
||||||
return []
|
|
||||||
|
|
||||||
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
|
|
||||||
job_keys = [job['jobkey'] for job in jobs]
|
|
||||||
jobs_detailed = self._get_job_details(job_keys)
|
|
||||||
|
|
||||||
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
|
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
|
||||||
job_results: list[Future] = [
|
job_results: list[Future] = [
|
||||||
executor.submit(self._process_job, job, job_detailed['job']) for job, job_detailed in zip(jobs, jobs_detailed)
|
executor.submit(self._process_job, job['job']) for job in jobs
|
||||||
]
|
]
|
||||||
job_list = [result.result() for result in job_results if result.result()]
|
job_list = [result.result() for result in job_results if result.result()]
|
||||||
|
return job_list, new_cursor
|
||||||
|
|
||||||
return job_list
|
def _build_filters(self):
|
||||||
|
"""
|
||||||
|
Builds the filters dict for job type/is_remote. If hours_old is provided, composite filter for job_type/is_remote is not possible.
|
||||||
|
IndeedApply: filters: { keyword: { field: "indeedApplyScope", keys: ["DESKTOP"] } }
|
||||||
|
"""
|
||||||
|
filters_str = ""
|
||||||
|
if self.scraper_input.hours_old:
|
||||||
|
filters_str = """
|
||||||
|
filters: {{
|
||||||
|
date: {{
|
||||||
|
field: "dateOnIndeed",
|
||||||
|
start: "{start}h"
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
""".format(start=self.scraper_input.hours_old)
|
||||||
|
elif self.scraper_input.job_type or self.scraper_input.is_remote:
|
||||||
|
job_type_key_mapping = {
|
||||||
|
JobType.FULL_TIME: "CF3CP",
|
||||||
|
JobType.PART_TIME: "75GKK",
|
||||||
|
JobType.CONTRACT: "NJXCK",
|
||||||
|
JobType.INTERNSHIP: "VDTG7",
|
||||||
|
}
|
||||||
|
|
||||||
def _process_job(self, job: dict, job_detailed: dict) -> JobPost | None:
|
keys = []
|
||||||
job_url = f'{self.base_url}/m/jobs/viewjob?jk={job["jobkey"]}'
|
if self.scraper_input.job_type:
|
||||||
job_url_client = f'{self.base_url}/viewjob?jk={job["jobkey"]}'
|
key = job_type_key_mapping[self.scraper_input.job_type]
|
||||||
|
keys.append(key)
|
||||||
|
|
||||||
|
if self.scraper_input.is_remote:
|
||||||
|
keys.append("DSQF7")
|
||||||
|
|
||||||
|
if keys:
|
||||||
|
keys_str = '", "'.join(keys) # Prepare your keys string
|
||||||
|
filters_str = f"""
|
||||||
|
filters: {{
|
||||||
|
composite: {{
|
||||||
|
filters: [{{
|
||||||
|
keyword: {{
|
||||||
|
field: "attributes",
|
||||||
|
keys: ["{keys_str}"]
|
||||||
|
}}
|
||||||
|
}}]
|
||||||
|
}}
|
||||||
|
}}
|
||||||
|
"""
|
||||||
|
return filters_str
|
||||||
|
|
||||||
|
def _process_job(self, job: dict) -> JobPost | None:
|
||||||
|
"""
|
||||||
|
Parses the job dict into JobPost model
|
||||||
|
:param job: dict to parse
|
||||||
|
:return: JobPost if it's a new job
|
||||||
|
"""
|
||||||
|
job_url = f'{self.base_url}/viewjob?jk={job["key"]}'
|
||||||
if job_url in self.seen_urls:
|
if job_url in self.seen_urls:
|
||||||
return None
|
return
|
||||||
self.seen_urls.add(job_url)
|
self.seen_urls.add(job_url)
|
||||||
description = job_detailed['description']['html']
|
description = job['description']['html']
|
||||||
description = markdown_converter(description) if self.scraper_input.description_format == DescriptionFormat.MARKDOWN else description
|
description = markdown_converter(description) if self.scraper_input.description_format == DescriptionFormat.MARKDOWN else description
|
||||||
job_type = self._get_job_type(job)
|
|
||||||
timestamp_seconds = job["pubDate"] / 1000
|
job_type = self._get_job_type(job['attributes'])
|
||||||
date_posted = datetime.fromtimestamp(timestamp_seconds)
|
timestamp_seconds = job["datePublished"] / 1000
|
||||||
date_posted = date_posted.strftime("%Y-%m-%d")
|
date_posted = datetime.fromtimestamp(timestamp_seconds).strftime("%Y-%m-%d")
|
||||||
|
employer = job['employer'].get('dossier') if job['employer'] else None
|
||||||
|
employer_details = employer.get('employerDetails', {}) if employer else {}
|
||||||
return JobPost(
|
return JobPost(
|
||||||
title=job["normTitle"],
|
title=job["title"],
|
||||||
description=description,
|
description=description,
|
||||||
company_name=job["company"],
|
company_name=job['employer'].get("name") if job.get('employer') else None,
|
||||||
company_url=f"{self.base_url}{job_detailed['employer']['relativeCompanyPageUrl']}" if job_detailed[
|
company_url=f"{self.base_url}{job['employer']['relativeCompanyPageUrl']}" if job[
|
||||||
'employer'] else None,
|
'employer'] else None,
|
||||||
|
company_url_direct=employer['links']['corporateWebsite'] if employer else None,
|
||||||
|
|
||||||
location=Location(
|
location=Location(
|
||||||
city=job.get("jobLocationCity"),
|
city=job.get("location", {}).get("city"),
|
||||||
state=job.get("jobLocationState"),
|
state=job.get("location", {}).get("admin1Code"),
|
||||||
country=self.scraper_input.country,
|
country=job.get("location", {}).get("countryCode"),
|
||||||
),
|
),
|
||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
compensation=self._get_compensation(job, job_detailed),
|
compensation=self._get_compensation(job),
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url_client,
|
job_url=job_url,
|
||||||
|
job_url_direct=job['recruit'].get('viewJobUrl') if job.get('recruit') else None,
|
||||||
emails=extract_emails_from_text(description) if description else None,
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
num_urgent_words=count_urgent_words(description) if description else None,
|
is_remote=self._is_job_remote(job, description),
|
||||||
is_remote=self._is_job_remote(job, job_detailed, description)
|
|
||||||
|
company_addresses=employer_details['addresses'][0] if employer_details.get('addresses') else None,
|
||||||
|
company_industry=employer_details['industry'].replace('Iv1', '').replace('_', ' ').title() if employer_details.get('industry') else None,
|
||||||
|
company_num_employees=employer_details.get('employeesLocalizedLabel'),
|
||||||
|
company_revenue=employer_details.get('revenueLocalizedLabel'),
|
||||||
|
company_description=employer_details.get('briefDescription'),
|
||||||
|
ceo_name=employer_details.get('ceoName'),
|
||||||
|
ceo_photo_url=employer_details.get('ceoPhotoUrl'),
|
||||||
|
|
||||||
|
logo_photo_url=employer['images'].get('squareLogoUrl') if employer and employer.get('images') else None,
|
||||||
|
banner_photo_url=employer['images'].get('headerImageUrl') if employer and employer.get('images') else None,
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_job_details(self, job_keys: list[str]) -> dict:
|
|
||||||
"""
|
|
||||||
Queries the GraphQL endpoint for detailed job information for the given job keys.
|
|
||||||
"""
|
|
||||||
job_keys_gql = '[' + ', '.join(f'"{key}"' for key in job_keys) + ']'
|
|
||||||
payload = dict(self.api_payload)
|
|
||||||
payload["query"] = self.api_payload["query"].format(job_keys_gql=job_keys_gql)
|
|
||||||
response = requests.post(self.api_url, headers=self.api_headers, json=payload, proxies=self.proxy)
|
|
||||||
if response.status_code == 200:
|
|
||||||
return response.json()['data']['jobData']['results']
|
|
||||||
else:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
def _add_params(self, page: int) -> dict[str, str | Any]:
|
|
||||||
fromage = max(self.scraper_input.hours_old // 24, 1) if self.scraper_input.hours_old else None
|
|
||||||
params = {
|
|
||||||
"q": self.scraper_input.search_term,
|
|
||||||
"l": self.scraper_input.location if self.scraper_input.location else self.scraper_input.country.value[0].split(',')[-1],
|
|
||||||
"filter": 0,
|
|
||||||
"start": self.scraper_input.offset + page * 10,
|
|
||||||
"sort": "date",
|
|
||||||
"fromage": fromage,
|
|
||||||
}
|
|
||||||
if self.scraper_input.distance:
|
|
||||||
params["radius"] = self.scraper_input.distance
|
|
||||||
|
|
||||||
sc_values = []
|
|
||||||
if self.scraper_input.is_remote:
|
|
||||||
sc_values.append("attr(DSQF7)")
|
|
||||||
if self.scraper_input.job_type:
|
|
||||||
sc_values.append("jt({})".format(self.scraper_input.job_type.value[0]))
|
|
||||||
|
|
||||||
if sc_values:
|
|
||||||
params["sc"] = "0kf:" + "".join(sc_values) + ";"
|
|
||||||
|
|
||||||
if self.scraper_input.easy_apply:
|
|
||||||
params['iafilter'] = 1
|
|
||||||
|
|
||||||
return params
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_job_type(job: dict) -> list[JobType] | None:
|
def _get_job_type(attributes: list) -> list[JobType]:
|
||||||
"""
|
"""
|
||||||
Parses the job to get list of job types
|
Parses the attributes to get list of job types
|
||||||
:param job:
|
:param attributes:
|
||||||
:return:
|
:return: list of JobType
|
||||||
"""
|
"""
|
||||||
job_types: list[JobType] = []
|
job_types: list[JobType] = []
|
||||||
for taxonomy in job["taxonomyAttributes"]:
|
for attribute in attributes:
|
||||||
if taxonomy["label"] == "job-types":
|
job_type_str = attribute['label'].replace("-", "").replace(" ", "").lower()
|
||||||
for i in range(len(taxonomy["attributes"])):
|
|
||||||
label = taxonomy["attributes"][i].get("label")
|
|
||||||
if label:
|
|
||||||
job_type_str = label.replace("-", "").replace(" ", "").lower()
|
|
||||||
job_type = get_enum_from_job_type(job_type_str)
|
job_type = get_enum_from_job_type(job_type_str)
|
||||||
if job_type:
|
if job_type:
|
||||||
job_types.append(job_type)
|
job_types.append(job_type)
|
||||||
return job_types
|
return job_types
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_compensation(job: dict, job_detailed: dict) -> Compensation:
|
def _get_compensation(job: dict) -> Compensation | None:
|
||||||
"""
|
"""
|
||||||
Parses the job to get
|
Parses the job to get compensation
|
||||||
|
:param job:
|
||||||
:param job:
|
:param job:
|
||||||
:param job_detailed:
|
|
||||||
:return: compensation object
|
:return: compensation object
|
||||||
"""
|
"""
|
||||||
comp = job_detailed['compensation']['baseSalary']
|
comp = job['compensation']['baseSalary']
|
||||||
if comp:
|
if comp:
|
||||||
interval = IndeedScraper._get_correct_interval(comp['unitOfWork'])
|
interval = IndeedScraper._get_compensation_interval(comp['unitOfWork'])
|
||||||
if interval:
|
if interval:
|
||||||
return Compensation(
|
return Compensation(
|
||||||
interval=interval,
|
interval=interval,
|
||||||
min_amount=round(comp['range'].get('min'), 2) if comp['range'].get('min') is not None else None,
|
min_amount=round(comp['range'].get('min'), 2) if comp['range'].get('min') is not None else None,
|
||||||
max_amount=round(comp['range'].get('max'), 2) if comp['range'].get('max') is not None else None,
|
max_amount=round(comp['range'].get('max'), 2) if comp['range'].get('max') is not None else None,
|
||||||
currency=job_detailed['compensation']['currencyCode']
|
currency=job['compensation']['currencyCode']
|
||||||
)
|
)
|
||||||
|
|
||||||
extracted_salary = job.get("extractedSalary")
|
|
||||||
compensation = None
|
|
||||||
if extracted_salary:
|
|
||||||
salary_snippet = job.get("salarySnippet")
|
|
||||||
currency = salary_snippet.get("currency") if salary_snippet else None
|
|
||||||
interval = (extracted_salary.get("type"),)
|
|
||||||
if isinstance(interval, tuple):
|
|
||||||
interval = interval[0]
|
|
||||||
|
|
||||||
interval = interval.upper()
|
|
||||||
if interval in CompensationInterval.__members__:
|
|
||||||
compensation = Compensation(
|
|
||||||
interval=CompensationInterval[interval],
|
|
||||||
min_amount=int(extracted_salary.get("min")),
|
|
||||||
max_amount=int(extracted_salary.get("max")),
|
|
||||||
currency=currency,
|
|
||||||
)
|
|
||||||
return compensation
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _parse_jobs(soup: BeautifulSoup) -> dict:
|
def _is_job_remote(job: dict, description: str) -> bool:
|
||||||
"""
|
"""
|
||||||
Parses the jobs from the soup object
|
Searches the description, location, and attributes to check if job is remote
|
||||||
:param soup:
|
|
||||||
:return: jobs
|
|
||||||
"""
|
"""
|
||||||
def find_mosaic_script() -> Tag | None:
|
|
||||||
script_tags = soup.find_all("script")
|
|
||||||
|
|
||||||
for tag in script_tags:
|
|
||||||
if (
|
|
||||||
tag.string
|
|
||||||
and "mosaic.providerData" in tag.string
|
|
||||||
and "mosaic-provider-jobcards" in tag.string
|
|
||||||
):
|
|
||||||
return tag
|
|
||||||
return None
|
|
||||||
|
|
||||||
script_tag = find_mosaic_script()
|
|
||||||
if script_tag:
|
|
||||||
script_str = script_tag.string
|
|
||||||
pattern = r'window.mosaic.providerData\["mosaic-provider-jobcards"\]\s*=\s*({.*?});'
|
|
||||||
p = re.compile(pattern, re.DOTALL)
|
|
||||||
m = p.search(script_str)
|
|
||||||
if m:
|
|
||||||
jobs = json.loads(m.group(1).strip())
|
|
||||||
return jobs
|
|
||||||
else:
|
|
||||||
logger.warning(f'Indeed: Could not find mosaic provider job cards data')
|
|
||||||
return {}
|
|
||||||
else:
|
|
||||||
logger.warning(f"Indeed: Could not parse any jobs on the page")
|
|
||||||
return {}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _is_job_remote(job: dict, job_detailed: dict, description: str) -> bool:
|
|
||||||
remote_keywords = ['remote', 'work from home', 'wfh']
|
remote_keywords = ['remote', 'work from home', 'wfh']
|
||||||
is_remote_in_attributes = any(
|
is_remote_in_attributes = any(
|
||||||
any(keyword in attr['label'].lower() for keyword in remote_keywords)
|
any(keyword in attr['label'].lower() for keyword in remote_keywords)
|
||||||
for attr in job_detailed['attributes']
|
for attr in job['attributes']
|
||||||
)
|
)
|
||||||
is_remote_in_description = any(keyword in description.lower() for keyword in remote_keywords)
|
is_remote_in_description = any(keyword in description.lower() for keyword in remote_keywords)
|
||||||
is_remote_in_location = any(
|
is_remote_in_location = any(
|
||||||
keyword in job_detailed['location']['formatted']['long'].lower()
|
keyword in job['location']['formatted']['long'].lower()
|
||||||
for keyword in remote_keywords
|
for keyword in remote_keywords
|
||||||
)
|
)
|
||||||
is_remote_in_taxonomy = any(
|
return is_remote_in_attributes or is_remote_in_description or is_remote_in_location
|
||||||
taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0
|
|
||||||
for taxonomy in job.get("taxonomyAttributes", [])
|
|
||||||
)
|
|
||||||
return is_remote_in_attributes or is_remote_in_description or is_remote_in_location or is_remote_in_taxonomy
|
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_correct_interval(interval: str) -> CompensationInterval:
|
def _get_compensation_interval(interval: str) -> CompensationInterval:
|
||||||
interval_mapping = {
|
interval_mapping = {
|
||||||
"DAY": "DAILY",
|
"DAY": "DAILY",
|
||||||
"YEAR": "YEARLY",
|
"YEAR": "YEARLY",
|
||||||
|
@ -341,16 +278,6 @@ class IndeedScraper(Scraper):
|
||||||
else:
|
else:
|
||||||
raise ValueError(f"Unsupported interval: {interval}")
|
raise ValueError(f"Unsupported interval: {interval}")
|
||||||
|
|
||||||
headers = {
|
|
||||||
'Host': 'www.indeed.com',
|
|
||||||
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
|
|
||||||
'sec-fetch-site': 'same-origin',
|
|
||||||
'sec-fetch-dest': 'document',
|
|
||||||
'accept-language': 'en-US,en;q=0.9',
|
|
||||||
'sec-fetch-mode': 'navigate',
|
|
||||||
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 192.0',
|
|
||||||
'referer': 'https://www.indeed.com/m/jobs?q=software%20intern&l=Dallas%2C%20TX&from=serpso&rq=1&rsIdx=3',
|
|
||||||
}
|
|
||||||
api_headers = {
|
api_headers = {
|
||||||
'Host': 'apis.indeed.com',
|
'Host': 'apis.indeed.com',
|
||||||
'content-type': 'application/json',
|
'content-type': 'application/json',
|
||||||
|
@ -360,24 +287,35 @@ class IndeedScraper(Scraper):
|
||||||
'accept-language': 'en-US,en;q=0.9',
|
'accept-language': 'en-US,en;q=0.9',
|
||||||
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1',
|
'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1',
|
||||||
'indeed-app-info': 'appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone',
|
'indeed-app-info': 'appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone',
|
||||||
'indeed-co': 'US',
|
|
||||||
}
|
}
|
||||||
api_payload = {
|
job_search_query = """
|
||||||
"query": """
|
|
||||||
query GetJobData {{
|
query GetJobData {{
|
||||||
jobData(input: {{
|
jobSearch(
|
||||||
jobKeys: {job_keys_gql}
|
what: "{what}"
|
||||||
}}) {{
|
location: {{ where: "{location}", radius: {radius}, radiusUnit: MILES }}
|
||||||
|
includeSponsoredResults: NONE
|
||||||
|
limit: 100
|
||||||
|
sort: DATE
|
||||||
|
{cursor}
|
||||||
|
{filters}
|
||||||
|
) {{
|
||||||
|
pageInfo {{
|
||||||
|
nextCursor
|
||||||
|
}}
|
||||||
results {{
|
results {{
|
||||||
|
trackingKey
|
||||||
job {{
|
job {{
|
||||||
key
|
key
|
||||||
title
|
title
|
||||||
|
datePublished
|
||||||
|
dateOnIndeed
|
||||||
description {{
|
description {{
|
||||||
html
|
html
|
||||||
}}
|
}}
|
||||||
location {{
|
location {{
|
||||||
countryName
|
countryName
|
||||||
countryCode
|
countryCode
|
||||||
|
admin1Code
|
||||||
city
|
city
|
||||||
postalCode
|
postalCode
|
||||||
streetAddress
|
streetAddress
|
||||||
|
@ -399,10 +337,30 @@ class IndeedScraper(Scraper):
|
||||||
currencyCode
|
currencyCode
|
||||||
}}
|
}}
|
||||||
attributes {{
|
attributes {{
|
||||||
|
key
|
||||||
label
|
label
|
||||||
}}
|
}}
|
||||||
employer {{
|
employer {{
|
||||||
relativeCompanyPageUrl
|
relativeCompanyPageUrl
|
||||||
|
name
|
||||||
|
dossier {{
|
||||||
|
employerDetails {{
|
||||||
|
addresses
|
||||||
|
industry
|
||||||
|
employeesLocalizedLabel
|
||||||
|
revenueLocalizedLabel
|
||||||
|
briefDescription
|
||||||
|
ceoName
|
||||||
|
ceoPhotoUrl
|
||||||
|
}}
|
||||||
|
images {{
|
||||||
|
headerImageUrl
|
||||||
|
squareLogoUrl
|
||||||
|
}}
|
||||||
|
links {{
|
||||||
|
corporateWebsite
|
||||||
|
}}
|
||||||
|
}}
|
||||||
}}
|
}}
|
||||||
recruit {{
|
recruit {{
|
||||||
viewJobUrl
|
viewJobUrl
|
||||||
|
@ -414,4 +372,3 @@ class IndeedScraper(Scraper):
|
||||||
}}
|
}}
|
||||||
}}
|
}}
|
||||||
"""
|
"""
|
||||||
}
|
|
||||||
|
|
|
@ -28,7 +28,6 @@ from ...jobs import (
|
||||||
)
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
logger,
|
logger,
|
||||||
count_urgent_words,
|
|
||||||
extract_emails_from_text,
|
extract_emails_from_text,
|
||||||
get_enum_from_job_type,
|
get_enum_from_job_type,
|
||||||
currency_parser,
|
currency_parser,
|
||||||
|
@ -187,7 +186,6 @@ class LinkedInScraper(Scraper):
|
||||||
except:
|
except:
|
||||||
date_posted = None
|
date_posted = None
|
||||||
benefits_tag = job_card.find("span", class_="result-benefits__text")
|
benefits_tag = job_card.find("span", class_="result-benefits__text")
|
||||||
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
|
|
||||||
if full_descr:
|
if full_descr:
|
||||||
description, job_type = self._get_job_description(job_url)
|
description, job_type = self._get_job_description(job_url)
|
||||||
|
|
||||||
|
@ -199,11 +197,9 @@ class LinkedInScraper(Scraper):
|
||||||
date_posted=date_posted,
|
date_posted=date_posted,
|
||||||
job_url=job_url,
|
job_url=job_url,
|
||||||
compensation=compensation,
|
compensation=compensation,
|
||||||
benefits=benefits,
|
|
||||||
job_type=job_type,
|
job_type=job_type,
|
||||||
description=description,
|
description=description,
|
||||||
emails=extract_emails_from_text(description) if description else None,
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
num_urgent_words=count_urgent_words(description) if description else None,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_job_description(
|
def _get_job_description(
|
||||||
|
|
|
@ -19,20 +19,6 @@ if not logger.handlers:
|
||||||
logger.addHandler(console_handler)
|
logger.addHandler(console_handler)
|
||||||
|
|
||||||
|
|
||||||
def count_urgent_words(description: str) -> int:
|
|
||||||
"""
|
|
||||||
Count the number of urgent words or phrases in a job description.
|
|
||||||
"""
|
|
||||||
urgent_patterns = re.compile(
|
|
||||||
r"\burgen(t|cy)|\bimmediate(ly)?\b|start asap|\bhiring (now|immediate(ly)?)\b",
|
|
||||||
re.IGNORECASE,
|
|
||||||
)
|
|
||||||
matches = re.findall(urgent_patterns, description)
|
|
||||||
count = len(matches)
|
|
||||||
|
|
||||||
return count
|
|
||||||
|
|
||||||
|
|
||||||
def markdown_converter(description_html: str):
|
def markdown_converter(description_html: str):
|
||||||
if description_html is None:
|
if description_html is None:
|
||||||
return None
|
return None
|
||||||
|
|
|
@ -14,7 +14,6 @@ from concurrent.futures import ThreadPoolExecutor
|
||||||
from .. import Scraper, ScraperInput, Site
|
from .. import Scraper, ScraperInput, Site
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
logger,
|
logger,
|
||||||
count_urgent_words,
|
|
||||||
extract_emails_from_text,
|
extract_emails_from_text,
|
||||||
create_session,
|
create_session,
|
||||||
markdown_converter
|
markdown_converter
|
||||||
|
@ -161,7 +160,6 @@ class ZipRecruiterScraper(Scraper):
|
||||||
job_url=job_url,
|
job_url=job_url,
|
||||||
description=description,
|
description=description,
|
||||||
emails=extract_emails_from_text(description) if description else None,
|
emails=extract_emails_from_text(description) if description else None,
|
||||||
num_urgent_words=count_urgent_words(description) if description else None,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
def _get_cookies(self):
|
def _get_cookies(self):
|
||||||
|
|
Loading…
Reference in New Issue