mirror of
https://github.com/Bunsly/JobSpy.git
synced 2026-03-05 03:54:31 -08:00
Compare commits
2 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a4f6851c32 | ||
|
|
db01bc6bbb |
17
README.md
17
README.md
@@ -104,15 +104,6 @@ JobPost
|
||||
└── is_remote (bool)
|
||||
```
|
||||
|
||||
### Exceptions
|
||||
|
||||
The following exceptions may be raised when using JobSpy:
|
||||
|
||||
* `LinkedInException`
|
||||
* `IndeedException`
|
||||
* `ZipRecruiterException`
|
||||
* `GlassdoorException`
|
||||
|
||||
## Supported Countries for Job Searching
|
||||
|
||||
### **LinkedIn**
|
||||
@@ -147,7 +138,7 @@ You can specify the following countries when searching on Indeed (use the exact
|
||||
| South Korea | Spain* | Sweden | Switzerland* |
|
||||
| Taiwan | Thailand | Turkey | Ukraine |
|
||||
| United Arab Emirates | UK* | USA* | Uruguay |
|
||||
| Venezuela | Vietnam | | |
|
||||
| Venezuela | Vietnam* | | |
|
||||
|
||||
|
||||
Glassdoor can only fetch 900 jobs from the endpoint we're using on a given search.
|
||||
@@ -167,8 +158,4 @@ persist, [submit an issue](https://github.com/Bunsly/JobSpy/issues).
|
||||
- Waiting some time between scrapes (site-dependent).
|
||||
- Trying a VPN or proxy to change your IP address.
|
||||
|
||||
---
|
||||
|
||||
|
||||
|
||||
|
||||
---
|
||||
40
poetry.lock
generated
40
poetry.lock
generated
@@ -1,4 +1,4 @@
|
||||
# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
|
||||
# This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "annotated-types"
|
||||
@@ -524,17 +524,6 @@ files = [
|
||||
{file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "html2text"
|
||||
version = "2020.1.16"
|
||||
description = "Turn HTML into equivalent Markdown-structured text."
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
files = [
|
||||
{file = "html2text-2020.1.16-py3-none-any.whl", hash = "sha256:c7c629882da0cf377d66f073329ccf34a12ed2adf0169b9285ae4e63ef54c82b"},
|
||||
{file = "html2text-2020.1.16.tar.gz", hash = "sha256:e296318e16b059ddb97f7a8a1d6a5c1d7af4544049a01e261731d2d5cc277bbb"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "3.4"
|
||||
@@ -1037,6 +1026,21 @@ files = [
|
||||
{file = "jupyterlab_widgets-3.0.8.tar.gz", hash = "sha256:d428ab97b8d87cc7c54cbf37644d6e0f0e662f23876e05fa460a73ec3257252a"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "markdownify"
|
||||
version = "0.11.6"
|
||||
description = "Convert HTML to markdown."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "markdownify-0.11.6-py3-none-any.whl", hash = "sha256:ba35fe289d5e9073bcd7d2cad629278fe25f1a93741fcdc0bfb4f009076d8324"},
|
||||
{file = "markdownify-0.11.6.tar.gz", hash = "sha256:009b240e0c9f4c8eaf1d085625dcd4011e12f0f8cec55dedf9ea6f7655e49bfe"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
beautifulsoup4 = ">=4.9,<5"
|
||||
six = ">=1.15,<2"
|
||||
|
||||
[[package]]
|
||||
name = "markupsafe"
|
||||
version = "2.1.3"
|
||||
@@ -1064,16 +1068,6 @@ files = [
|
||||
{file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
|
||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
|
||||
{file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
|
||||
{file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
|
||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
|
||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
|
||||
{file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
|
||||
@@ -2456,4 +2450,4 @@ files = [
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.10"
|
||||
content-hash = "eea3694820df164179cdd8312d382eb5b29d6317c4d34c586e8866c69aaee9e9"
|
||||
content-hash = "ba7f7cc9b6833a4a6271981f90610395639dd8b9b3db1370cbd1149d70cc9632"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[tool.poetry]
|
||||
name = "python-jobspy"
|
||||
version = "1.1.46"
|
||||
version = "1.1.47"
|
||||
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
|
||||
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
|
||||
homepage = "https://github.com/Bunsly/JobSpy"
|
||||
@@ -17,8 +17,8 @@ beautifulsoup4 = "^4.12.2"
|
||||
pandas = "^2.1.0"
|
||||
NUMPY = "1.24.2"
|
||||
pydantic = "^2.3.0"
|
||||
html2text = "^2020.1.16"
|
||||
tls-client = "^1.0.1"
|
||||
markdownify = "^0.11.6"
|
||||
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
|
||||
@@ -122,7 +122,7 @@ class Country(Enum):
|
||||
USA = ("usa,us,united states", "www", "com")
|
||||
URUGUAY = ("uruguay", "uy")
|
||||
VENEZUELA = ("venezuela", "ve")
|
||||
VIETNAM = ("vietnam", "vn")
|
||||
VIETNAM = ("vietnam", "vn", "com")
|
||||
|
||||
# internal for ziprecruiter
|
||||
US_CANADA = ("usa/ca", "www")
|
||||
@@ -145,7 +145,7 @@ class Country(Enum):
|
||||
else:
|
||||
raise Exception(f"Glassdoor is not available for {self.name}")
|
||||
|
||||
def get_url(self):
|
||||
def get_glassdoor_url(self):
|
||||
return f"https://{self.glassdoor_domain_value}/"
|
||||
|
||||
@classmethod
|
||||
|
||||
@@ -5,6 +5,8 @@ jobspy.scrapers.glassdoor
|
||||
This module contains routines to scrape Glassdoor.
|
||||
"""
|
||||
import json
|
||||
import re
|
||||
|
||||
import requests
|
||||
from typing import Optional
|
||||
from datetime import datetime, timedelta
|
||||
@@ -42,6 +44,7 @@ class GlassdoorScraper(Scraper):
|
||||
self.session = None
|
||||
self.scraper_input = None
|
||||
self.jobs_per_page = 30
|
||||
self.max_pages = 30
|
||||
self.seen_urls = set()
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
@@ -52,39 +55,40 @@ class GlassdoorScraper(Scraper):
|
||||
"""
|
||||
self.scraper_input = scraper_input
|
||||
self.scraper_input.results_wanted = min(900, scraper_input.results_wanted)
|
||||
self.base_url = self.scraper_input.country.get_url()
|
||||
self.base_url = self.scraper_input.country.get_glassdoor_url()
|
||||
|
||||
self.session = create_session(self.proxy, is_tls=True, has_retry=True)
|
||||
token = self._get_csrf_token()
|
||||
self.headers['gd-csrf-token'] = token if token else self.fallback_token
|
||||
|
||||
location_id, location_type = self._get_location(
|
||||
scraper_input.location, scraper_input.is_remote
|
||||
)
|
||||
if location_type is None:
|
||||
logger.error('Glassdoor: location not parsed')
|
||||
return JobResponse(jobs=[])
|
||||
all_jobs: list[JobPost] = []
|
||||
cursor = None
|
||||
max_pages = 30
|
||||
self.session = create_session(self.proxy, is_tls=False, has_retry=True)
|
||||
self.session.get(self.base_url)
|
||||
|
||||
try:
|
||||
for page in range(
|
||||
1 + (scraper_input.offset // self.jobs_per_page),
|
||||
min(
|
||||
(scraper_input.results_wanted // self.jobs_per_page) + 2,
|
||||
max_pages + 1,
|
||||
),
|
||||
):
|
||||
try:
|
||||
jobs, cursor = self._fetch_jobs_page(
|
||||
scraper_input, location_id, location_type, page, cursor
|
||||
)
|
||||
all_jobs.extend(jobs)
|
||||
if len(all_jobs) >= scraper_input.results_wanted:
|
||||
all_jobs = all_jobs[: scraper_input.results_wanted]
|
||||
break
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
for page in range(
|
||||
1 + (scraper_input.offset // self.jobs_per_page),
|
||||
min(
|
||||
(scraper_input.results_wanted // self.jobs_per_page) + 2,
|
||||
self.max_pages + 1,
|
||||
),
|
||||
):
|
||||
logger.info(f'Glassdoor search page: {page}')
|
||||
try:
|
||||
jobs, cursor = self._fetch_jobs_page(
|
||||
scraper_input, location_id, location_type, page, cursor
|
||||
)
|
||||
all_jobs.extend(jobs)
|
||||
if not jobs or len(all_jobs) >= scraper_input.results_wanted:
|
||||
all_jobs = all_jobs[: scraper_input.results_wanted]
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f'Glassdoor: {str(e)}')
|
||||
break
|
||||
return JobResponse(jobs=all_jobs)
|
||||
|
||||
def _fetch_jobs_page(
|
||||
@@ -98,27 +102,26 @@ class GlassdoorScraper(Scraper):
|
||||
"""
|
||||
Scrapes a page of Glassdoor for jobs with scraper_input criteria
|
||||
"""
|
||||
jobs = []
|
||||
self.scraper_input = scraper_input
|
||||
try:
|
||||
payload = self._add_payload(
|
||||
location_id, location_type, page_num, cursor
|
||||
)
|
||||
response = self.session.post(
|
||||
f"{self.base_url}/graph", headers=self.headers, timeout=10, data=payload
|
||||
f"{self.base_url}/graph", headers=self.headers, timeout_seconds=15, data=payload
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise GlassdoorException(
|
||||
f"bad response status code: {response.status_code}"
|
||||
)
|
||||
raise GlassdoorException(f"bad response status code: {response.status_code}")
|
||||
res_json = response.json()[0]
|
||||
if "errors" in res_json:
|
||||
raise ValueError("Error encountered in API response")
|
||||
except Exception as e:
|
||||
raise GlassdoorException(str(e))
|
||||
except (requests.exceptions.ReadTimeout, GlassdoorException, ValueError, Exception) as e:
|
||||
logger.error(f'Glassdoor: {str(e)}')
|
||||
return jobs, None
|
||||
|
||||
jobs_data = res_json["data"]["jobListings"]["jobListings"]
|
||||
|
||||
jobs = []
|
||||
with ThreadPoolExecutor(max_workers=self.jobs_per_page) as executor:
|
||||
future_to_job_data = {executor.submit(self._process_job, job): job for job in jobs_data}
|
||||
for future in as_completed(future_to_job_data):
|
||||
@@ -133,6 +136,18 @@ class GlassdoorScraper(Scraper):
|
||||
res_json["data"]["jobListings"]["paginationCursors"], page_num + 1
|
||||
)
|
||||
|
||||
def _get_csrf_token(self):
|
||||
"""
|
||||
Fetches csrf token needed for API by visiting a generic page
|
||||
"""
|
||||
res = self.session.get(f'{self.base_url}/Job/computer-science-jobs.htm', headers=self.headers)
|
||||
pattern = r'"token":\s*"([^"]+)"'
|
||||
matches = re.findall(pattern, res.text)
|
||||
token = None
|
||||
if matches:
|
||||
token = matches[0]
|
||||
return token
|
||||
|
||||
def _process_job(self, job_data):
|
||||
"""
|
||||
Processes a single job and fetches its description.
|
||||
@@ -217,7 +232,7 @@ class GlassdoorScraper(Scraper):
|
||||
return "11047", "STATE" # remote options
|
||||
url = f"{self.base_url}/findPopularLocationAjax.htm?maxLocationsToReturn=10&term={location}"
|
||||
session = create_session(self.proxy, has_retry=True)
|
||||
res = session.get(url)
|
||||
res = self.session.get(url, headers=self.headers)
|
||||
if res.status_code != 200:
|
||||
if res.status_code == 429:
|
||||
logger.error(f'429 Response - Blocked by Glassdoor for too many requests')
|
||||
@@ -266,7 +281,74 @@ class GlassdoorScraper(Scraper):
|
||||
"fromage": fromage,
|
||||
"sort": "date"
|
||||
},
|
||||
"query": """
|
||||
"query": self.query_template
|
||||
}
|
||||
if self.scraper_input.job_type:
|
||||
payload["variables"]["filterParams"].append(
|
||||
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
|
||||
)
|
||||
return json.dumps([payload])
|
||||
|
||||
@staticmethod
|
||||
def parse_compensation(data: dict) -> Optional[Compensation]:
|
||||
pay_period = data.get("payPeriod")
|
||||
adjusted_pay = data.get("payPeriodAdjustedPay")
|
||||
currency = data.get("payCurrency", "USD")
|
||||
if not pay_period or not adjusted_pay:
|
||||
return None
|
||||
|
||||
interval = None
|
||||
if pay_period == "ANNUAL":
|
||||
interval = CompensationInterval.YEARLY
|
||||
elif pay_period:
|
||||
interval = CompensationInterval.get_interval(pay_period)
|
||||
min_amount = int(adjusted_pay.get("p10") // 1)
|
||||
max_amount = int(adjusted_pay.get("p90") // 1)
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=min_amount,
|
||||
max_amount=max_amount,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
|
||||
@staticmethod
|
||||
def parse_location(location_name: str) -> Location | None:
|
||||
if not location_name or location_name == "Remote":
|
||||
return
|
||||
city, _, state = location_name.partition(", ")
|
||||
return Location(city=city, state=state)
|
||||
|
||||
@staticmethod
|
||||
def get_cursor_for_page(pagination_cursors, page_num):
|
||||
for cursor_data in pagination_cursors:
|
||||
if cursor_data["pageNumber"] == page_num:
|
||||
return cursor_data["cursor"]
|
||||
|
||||
fallback_token = "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok"
|
||||
headers = {
|
||||
"authority": "www.glassdoor.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"apollographql-client-name": "job-search-next",
|
||||
"apollographql-client-version": "4.65.5",
|
||||
"content-type": "application/json",
|
||||
"origin": "https://www.glassdoor.com",
|
||||
"referer": "https://www.glassdoor.com/",
|
||||
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||
}
|
||||
query_template = """
|
||||
query JobSearchResultsQuery(
|
||||
$excludeJobListingIds: [Long!],
|
||||
$keyword: String,
|
||||
@@ -431,70 +513,4 @@ class GlassdoorScraper(Scraper):
|
||||
}
|
||||
__typename
|
||||
}
|
||||
"""
|
||||
}
|
||||
if self.scraper_input.job_type:
|
||||
payload["variables"]["filterParams"].append(
|
||||
{"filterKey": "jobType", "values": self.scraper_input.job_type.value[0]}
|
||||
)
|
||||
return json.dumps([payload])
|
||||
|
||||
@staticmethod
|
||||
def parse_compensation(data: dict) -> Optional[Compensation]:
|
||||
pay_period = data.get("payPeriod")
|
||||
adjusted_pay = data.get("payPeriodAdjustedPay")
|
||||
currency = data.get("payCurrency", "USD")
|
||||
if not pay_period or not adjusted_pay:
|
||||
return None
|
||||
|
||||
interval = None
|
||||
if pay_period == "ANNUAL":
|
||||
interval = CompensationInterval.YEARLY
|
||||
elif pay_period:
|
||||
interval = CompensationInterval.get_interval(pay_period)
|
||||
min_amount = int(adjusted_pay.get("p10") // 1)
|
||||
max_amount = int(adjusted_pay.get("p90") // 1)
|
||||
return Compensation(
|
||||
interval=interval,
|
||||
min_amount=min_amount,
|
||||
max_amount=max_amount,
|
||||
currency=currency,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_job_type_enum(job_type_str: str) -> list[JobType] | None:
|
||||
for job_type in JobType:
|
||||
if job_type_str in job_type.value:
|
||||
return [job_type]
|
||||
|
||||
@staticmethod
|
||||
def parse_location(location_name: str) -> Location | None:
|
||||
if not location_name or location_name == "Remote":
|
||||
return
|
||||
city, _, state = location_name.partition(", ")
|
||||
return Location(city=city, state=state)
|
||||
|
||||
@staticmethod
|
||||
def get_cursor_for_page(pagination_cursors, page_num):
|
||||
for cursor_data in pagination_cursors:
|
||||
if cursor_data["pageNumber"] == page_num:
|
||||
return cursor_data["cursor"]
|
||||
|
||||
headers = {
|
||||
"authority": "www.glassdoor.com",
|
||||
"accept": "*/*",
|
||||
"accept-language": "en-US,en;q=0.9",
|
||||
"apollographql-client-name": "job-search-next",
|
||||
"apollographql-client-version": "4.65.5",
|
||||
"content-type": "application/json",
|
||||
"gd-csrf-token": "Ft6oHEWlRZrxDww95Cpazw:0pGUrkb2y3TyOpAIqF2vbPmUXoXVkD3oEGDVkvfeCerceQ5-n8mBg3BovySUIjmCPHCaW0H2nQVdqzbtsYqf4Q:wcqRqeegRUa9MVLJGyujVXB7vWFPjdaS1CtrrzJq-ok",
|
||||
"origin": "https://www.glassdoor.com",
|
||||
"referer": "https://www.glassdoor.com/",
|
||||
"sec-ch-ua": '"Chromium";v="118", "Google Chrome";v="118", "Not=A?Brand";v="99"',
|
||||
"sec-ch-ua-mobile": "?0",
|
||||
"sec-ch-ua-platform": '"macOS"',
|
||||
"sec-fetch-dest": "empty",
|
||||
"sec-fetch-mode": "cors",
|
||||
"sec-fetch-site": "same-origin",
|
||||
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
|
||||
}
|
||||
"""
|
||||
@@ -15,7 +15,6 @@ from bs4 import BeautifulSoup
|
||||
from bs4.element import Tag
|
||||
from concurrent.futures import ThreadPoolExecutor, Future
|
||||
|
||||
from ..exceptions import IndeedException
|
||||
from ..utils import (
|
||||
count_urgent_words,
|
||||
extract_emails_from_text,
|
||||
@@ -63,8 +62,7 @@ class IndeedScraper(Scraper):
|
||||
while len(self.seen_urls) < scraper_input.results_wanted:
|
||||
pages_to_process = math.ceil((scraper_input.results_wanted - len(self.seen_urls)) / self.jobs_per_page)
|
||||
new_jobs = False
|
||||
|
||||
with ThreadPoolExecutor(max_workers=10) as executor:
|
||||
with ThreadPoolExecutor(max_workers=self.num_workers) as executor:
|
||||
futures: list[Future] = [
|
||||
executor.submit(self._scrape_page, page + pages_processed)
|
||||
for page in range(pages_to_process)
|
||||
@@ -93,10 +91,11 @@ class IndeedScraper(Scraper):
|
||||
:param page:
|
||||
:return: jobs found on page, total number of jobs found for search
|
||||
"""
|
||||
logger.info(f'Indeed search page: {page + 1}')
|
||||
job_list = []
|
||||
domain = self.scraper_input.country.indeed_domain_value
|
||||
self.base_url = f"https://{domain}.indeed.com"
|
||||
|
||||
|
||||
try:
|
||||
session = create_session(self.proxy)
|
||||
response = session.get(
|
||||
@@ -141,7 +140,6 @@ class IndeedScraper(Scraper):
|
||||
job_results: list[Future] = [
|
||||
executor.submit(self._process_job, job, job_detailed['job']) for job, job_detailed in zip(jobs, jobs_detailed)
|
||||
]
|
||||
|
||||
job_list = [result.result() for result in job_results if result.result()]
|
||||
|
||||
return job_list
|
||||
|
||||
@@ -9,8 +9,6 @@ import random
|
||||
from typing import Optional
|
||||
from datetime import datetime
|
||||
|
||||
import requests
|
||||
from requests.exceptions import ProxyError
|
||||
from threading import Lock
|
||||
from bs4.element import Tag
|
||||
from bs4 import BeautifulSoup
|
||||
@@ -41,15 +39,16 @@ from ..utils import (
|
||||
class LinkedInScraper(Scraper):
|
||||
base_url = "https://www.linkedin.com"
|
||||
delay = 3
|
||||
band_delay = 4
|
||||
jobs_per_page = 25
|
||||
|
||||
def __init__(self, proxy: Optional[str] = None):
|
||||
"""
|
||||
Initializes LinkedInScraper with the LinkedIn job search url
|
||||
"""
|
||||
super().__init__(Site(Site.LINKEDIN), proxy=proxy)
|
||||
self.scraper_input = None
|
||||
site = Site(Site.LINKEDIN)
|
||||
self.country = "worldwide"
|
||||
super().__init__(site, proxy=proxy)
|
||||
|
||||
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
||||
"""
|
||||
@@ -68,8 +67,8 @@ class LinkedInScraper(Scraper):
|
||||
else None
|
||||
)
|
||||
continue_search = lambda: len(job_list) < scraper_input.results_wanted and page < 1000
|
||||
|
||||
while continue_search():
|
||||
logger.info(f'LinkedIn search page: {page // 25 + 1}')
|
||||
session = create_session(is_tls=False, has_retry=True, delay=5)
|
||||
params = {
|
||||
"keywords": scraper_input.search_term,
|
||||
@@ -83,8 +82,9 @@ class LinkedInScraper(Scraper):
|
||||
"start": page + scraper_input.offset,
|
||||
"f_AL": "true" if scraper_input.easy_apply else None,
|
||||
"f_C": ','.join(map(str, scraper_input.linkedin_company_ids)) if scraper_input.linkedin_company_ids else None,
|
||||
"f_TPR": f"r{seconds_old}",
|
||||
}
|
||||
if seconds_old is not None:
|
||||
params["f_TPR"] = f"r{seconds_old}"
|
||||
|
||||
params = {k: v for k, v in params.items() if v is not None}
|
||||
try:
|
||||
@@ -101,13 +101,13 @@ class LinkedInScraper(Scraper):
|
||||
logger.error(f'429 Response - Blocked by LinkedIn for too many requests')
|
||||
else:
|
||||
logger.error(f'LinkedIn response status code {response.status_code}')
|
||||
return JobResponse(job_list=job_list)
|
||||
return JobResponse(jobs=job_list)
|
||||
except Exception as e:
|
||||
if "Proxy responded with" in str(e):
|
||||
logger.error(f'LinkedIn: Bad proxy')
|
||||
else:
|
||||
logger.error(f'LinkedIn: {str(e)}')
|
||||
return JobResponse(job_list=job_list)
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
soup = BeautifulSoup(response.text, "html.parser")
|
||||
job_cards = soup.find_all("div", class_="base-search-card")
|
||||
@@ -136,8 +136,8 @@ class LinkedInScraper(Scraper):
|
||||
raise LinkedInException(str(e))
|
||||
|
||||
if continue_search():
|
||||
time.sleep(random.uniform(self.delay, self.delay + 2))
|
||||
page += 25
|
||||
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
||||
page += self.jobs_per_page
|
||||
|
||||
job_list = job_list[: scraper_input.results_wanted]
|
||||
return JobResponse(jobs=job_list)
|
||||
|
||||
@@ -1,21 +1,19 @@
|
||||
import re
|
||||
import logging
|
||||
import numpy as np
|
||||
import re
|
||||
|
||||
import html2text
|
||||
import tls_client
|
||||
import numpy as np
|
||||
import requests
|
||||
import tls_client
|
||||
from markdownify import markdownify as md
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from ..jobs import JobType
|
||||
|
||||
text_maker = html2text.HTML2Text()
|
||||
logger = logging.getLogger("JobSpy")
|
||||
logger.propagate = False
|
||||
if not logger.handlers:
|
||||
logger.setLevel(logging.ERROR)
|
||||
logger.setLevel(logging.INFO)
|
||||
console_handler = logging.StreamHandler()
|
||||
console_handler.setLevel(logging.ERROR)
|
||||
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
console_handler.setFormatter(formatter)
|
||||
logger.addHandler(console_handler)
|
||||
@@ -37,13 +35,9 @@ def count_urgent_words(description: str) -> int:
|
||||
|
||||
def markdown_converter(description_html: str):
|
||||
if description_html is None:
|
||||
return ""
|
||||
text_maker.ignore_links = False
|
||||
try:
|
||||
markdown = text_maker.handle(description_html)
|
||||
return markdown.strip()
|
||||
except AssertionError as e:
|
||||
return ""
|
||||
return None
|
||||
markdown = md(description_html)
|
||||
return markdown.strip()
|
||||
|
||||
|
||||
def extract_emails_from_text(text: str) -> list[str] | None:
|
||||
|
||||
@@ -63,7 +63,7 @@ class ZipRecruiterScraper(Scraper):
|
||||
break
|
||||
if page > 1:
|
||||
time.sleep(self.delay)
|
||||
|
||||
logger.info(f'ZipRecruiter search page: {page}')
|
||||
jobs_on_page, continue_token = self._find_jobs_in_page(
|
||||
scraper_input, continue_token
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user