fix: indeed parse description bug (#96)

* fix(indeed): full descr

* chore: version
pull/97/head v1.1.39
Cullen Watson 2024-02-02 18:21:55 -06:00 committed by GitHub
parent 1ccf2290fe
commit ce3bd84ee5
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 16 additions and 7 deletions

View File

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "python-jobspy" name = "python-jobspy"
version = "1.1.38" version = "1.1.39"
description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter" description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter"
authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"] authors = ["Zachary Hampton <zachary@bunsly.com>", "Cullen Watson <cullen@bunsly.com>"]
homepage = "https://github.com/Bunsly/JobSpy" homepage = "https://github.com/Bunsly/JobSpy"

View File

@ -154,8 +154,9 @@ class IndeedScraper(Scraper):
) )
return job_post return job_post
workers = 10 if scraper_input.full_description else 10 # possibly lessen 10 when fetching desc based on feedback
jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"] jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]
with ThreadPoolExecutor(max_workers=1) as executor: with ThreadPoolExecutor(max_workers=workers) as executor:
job_results: list[Future] = [ job_results: list[Future] = [
executor.submit(process_job, job) for job in jobs executor.submit(process_job, job) for job in jobs
] ]
@ -206,7 +207,7 @@ class IndeedScraper(Scraper):
parsed_url = urllib.parse.urlparse(job_page_url) parsed_url = urllib.parse.urlparse(job_page_url)
params = urllib.parse.parse_qs(parsed_url.query) params = urllib.parse.parse_qs(parsed_url.query)
jk_value = params.get("jk", [None])[0] jk_value = params.get("jk", [None])[0]
formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1" formatted_url = f"{self.url}/m/viewjob?jk={jk_value}&spa=1"
session = create_session(self.proxy) session = create_session(self.proxy)
try: try:
@ -223,10 +224,18 @@ class IndeedScraper(Scraper):
return None return None
try: try:
data = json.loads(response.text) soup = BeautifulSoup(response.text, 'html.parser')
job_description = data["body"]["jobInfoWrapperModel"]["jobInfoModel"][ script_tags = soup.find_all('script')
"sanitizedJobDescription"
] job_description = ''
for tag in script_tags:
if 'window._initialData' in tag.text:
json_str = tag.text
json_str = json_str.split('window._initialData=')[1]
json_str = json_str.rsplit(';', 1)[0]
data = json.loads(json_str)
job_description = data["jobInfoWrapperModel"]["jobInfoModel"]["sanitizedJobDescription"]
break
except (KeyError, TypeError, IndexError): except (KeyError, TypeError, IndexError):
return None return None