From 2563c5ca0826b0bc8e333a8203ba02d5855ec5b5 Mon Sep 17 00:00:00 2001 From: Cullen Watson Date: Fri, 9 Feb 2024 12:05:10 -0600 Subject: [PATCH] enh: Indeed company url (#104) --- README.md | 3 +- pyproject.toml | 2 +- src/jobspy/__init__.py | 5 +- src/jobspy/jobs/__init__.py | 13 +- src/jobspy/scrapers/indeed/__init__.py | 249 +++++++++++++++++++------ 5 files changed, 203 insertions(+), 69 deletions(-) diff --git a/README.md b/README.md index f311ca6..461e740 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,6 @@ jobs = scrape_jobs( location="Dallas, TX", results_wanted=10, country_indeed='USA' # only needed for indeed / glassdoor - # full_description=True (get full description for LinkedIn/Indeed; slower) ) print(f"Found {len(jobs)} jobs") print(jobs.head()) @@ -68,7 +67,7 @@ Optional ├── job_type (enum): fulltime, parttime, internship, contract ├── proxy (str): in format 'http://user:pass@host:port' or [https, socks] ├── is_remote (bool) -├── full_description (bool): fetches full description for Indeed / LinkedIn (much slower) +├── full_description (bool): fetches full description for LinkedIn (slower) ├── results_wanted (int): number of job results to retrieve for each site specified in 'site_type' ├── easy_apply (bool): filters for jobs that are hosted on the job board site ├── linkedin_company_ids (list[int): searches for linkedin jobs with specific company ids diff --git a/pyproject.toml b/pyproject.toml index 8e8461d..eea69ad 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "python-jobspy" -version = "1.1.42" +version = "1.1.43" description = "Job scraper for LinkedIn, Indeed, Glassdoor & ZipRecruiter" authors = ["Zachary Hampton ", "Cullen Watson "] homepage = "https://github.com/Bunsly/JobSpy" diff --git a/src/jobspy/__init__.py b/src/jobspy/__init__.py index cf0222b..5d96b11 100644 --- a/src/jobspy/__init__.py +++ b/src/jobspy/__init__.py @@ -1,7 +1,6 @@ import pandas as pd from typing import Tuple -import concurrent.futures -from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import ThreadPoolExecutor, as_completed from .jobs import JobType, Location from .scrapers.indeed import IndeedScraper @@ -119,7 +118,7 @@ def scrape_jobs( executor.submit(worker, site): site for site in scraper_input.site_type } - for future in concurrent.futures.as_completed(future_to_site): + for future in as_completed(future_to_site): site_value, scraped_data = future.result() site_to_jobs_dict[site_value] = scraped_data diff --git a/src/jobspy/jobs/__init__.py b/src/jobspy/jobs/__init__.py index db600f1..a819897 100644 --- a/src/jobspy/jobs/__init__.py +++ b/src/jobspy/jobs/__init__.py @@ -193,13 +193,20 @@ class CompensationInterval(Enum): @classmethod def get_interval(cls, pay_period): - return cls[pay_period].value if pay_period in cls.__members__ else None + interval_mapping = { + "YEAR": cls.YEARLY, + "HOUR": cls.HOURLY, + } + if pay_period in interval_mapping: + return interval_mapping[pay_period].value + else: + return cls[pay_period].value if pay_period in cls.__members__ else None class Compensation(BaseModel): interval: Optional[CompensationInterval] = None - min_amount: int | None = None - max_amount: int | None = None + min_amount: float | None = None + max_amount: float | None = None currency: Optional[str] = "USD" diff --git a/src/jobspy/scrapers/indeed/__init__.py b/src/jobspy/scrapers/indeed/__init__.py index 9e21e70..695719a 100644 --- a/src/jobspy/scrapers/indeed/__init__.py +++ b/src/jobspy/scrapers/indeed/__init__.py @@ -6,8 +6,8 @@ This module contains routines to scrape Indeed. """ import re import math -import io import json +import requests from typing import Any from datetime import datetime @@ -80,13 +80,14 @@ class IndeedScraper(Scraper): raise IndeedException(str(e)) soup = BeautifulSoup(response.content, "html.parser") + job_list = [] + total_num_jobs = IndeedScraper.total_jobs(soup) if "did not match any jobs" in response.text: - raise IndeedException("Parsing exception: Search did not match any jobs") + return job_list, total_num_jobs jobs = IndeedScraper.parse_jobs( soup ) #: can raise exception, handled by main scrape function - total_num_jobs = IndeedScraper.total_jobs(soup) if ( not jobs.get("metaData", {}) @@ -95,70 +96,51 @@ class IndeedScraper(Scraper): ): raise IndeedException("No jobs found.") - def process_job(job: dict) -> JobPost | None: + def process_job(job: dict, job_detailed: dict) -> JobPost | None: job_url = f'{self.url}/m/jobs/viewjob?jk={job["jobkey"]}' job_url_client = f'{self.url}/viewjob?jk={job["jobkey"]}' if job_url in self.seen_urls: return None + self.seen_urls.add(job_url) + description = job_detailed['description']['html'] - extracted_salary = job.get("extractedSalary") - compensation = None - if extracted_salary: - salary_snippet = job.get("salarySnippet") - currency = salary_snippet.get("currency") if salary_snippet else None - interval = (extracted_salary.get("type"),) - if isinstance(interval, tuple): - interval = interval[0] - - interval = interval.upper() - if interval in CompensationInterval.__members__: - compensation = Compensation( - interval=CompensationInterval[interval], - min_amount=int(extracted_salary.get("min")), - max_amount=int(extracted_salary.get("max")), - currency=currency, - ) job_type = IndeedScraper.get_job_type(job) timestamp_seconds = job["pubDate"] / 1000 date_posted = datetime.fromtimestamp(timestamp_seconds) date_posted = date_posted.strftime("%Y-%m-%d") - description = self.get_description(job_url) if scraper_input.full_description else None - - with io.StringIO(job["snippet"]) as f: - soup_io = BeautifulSoup(f, "html.parser") - li_elements = soup_io.find_all("li") - if description is None and li_elements: - description = " ".join(li.text for li in li_elements) - job_post = JobPost( title=job["normTitle"], description=description, company_name=job["company"], - company_url=self.url + job["companyOverviewLink"] if "companyOverviewLink" in job else None, + company_url=f"{self.url}{job_detailed['employer']['relativeCompanyPageUrl']}" if job_detailed['employer'] else None, location=Location( city=job.get("jobLocationCity"), state=job.get("jobLocationState"), country=self.country, ), job_type=job_type, - compensation=compensation, + compensation=self.get_compensation(job, job_detailed), date_posted=date_posted, job_url=job_url_client, emails=extract_emails_from_text(description) if description else None, num_urgent_words=count_urgent_words(description) if description else None, - is_remote=self.is_remote_job(job), + is_remote=IndeedScraper.is_job_remote(job, job_detailed, description) + ) return job_post - workers = 10 if scraper_input.full_description else 10 # possibly lessen 10 when fetching desc based on feedback + workers = 10 jobs = jobs["metaData"]["mosaicProviderJobCardsModel"]["results"] + job_keys = [job['jobkey'] for job in jobs] + jobs_detailed = self.get_job_details(job_keys) + with ThreadPoolExecutor(max_workers=workers) as executor: job_results: list[Future] = [ - executor.submit(process_job, job) for job in jobs + executor.submit(process_job, job, job_detailed['job']) for job, job_detailed in zip(jobs, jobs_detailed) ] job_list = [result.result() for result in job_results if result.result()] @@ -171,26 +153,34 @@ class IndeedScraper(Scraper): :param scraper_input: :return: job_response """ - pages_to_process = ( - math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1 - ) - - #: get first page to initialize session job_list, total_results = self.scrape_page(scraper_input, 0) + pages_processed = 1 - with ThreadPoolExecutor(max_workers=10) as executor: - futures: list[Future] = [ - executor.submit(self.scrape_page, scraper_input, page) - for page in range(1, pages_to_process + 1) - ] + while len(self.seen_urls) < scraper_input.results_wanted: + pages_to_process = math.ceil((scraper_input.results_wanted - len(self.seen_urls)) / self.jobs_per_page) + new_jobs = False - for future in futures: - jobs, _ = future.result() + with ThreadPoolExecutor(max_workers=10) as executor: + futures: list[Future] = [ + executor.submit(self.scrape_page, scraper_input, page + pages_processed) + for page in range(pages_to_process) + ] - job_list += jobs + for future in futures: + jobs, _ = future.result() + if jobs: + job_list += jobs + new_jobs = True + if len(self.seen_urls) >= scraper_input.results_wanted: + break - if len(job_list) > scraper_input.results_wanted: - job_list = job_list[: scraper_input.results_wanted] + pages_processed += pages_to_process + if not new_jobs: + break + + + if len(self.seen_urls) > scraper_input.results_wanted: + job_list = job_list[:scraper_input.results_wanted] job_response = JobResponse( jobs=job_list, @@ -261,6 +251,44 @@ class IndeedScraper(Scraper): job_types.append(job_type) return job_types + @staticmethod + def get_compensation(job: dict, job_detailed: dict) -> Compensation: + """ + Parses the job to get + :param job: + :param job_detailed: + :return: compensation object + """ + comp = job_detailed['compensation']['baseSalary'] + if comp: + interval = IndeedScraper.get_correct_interval(comp['unitOfWork']) + if interval: + return Compensation( + interval=interval, + min_amount=round(comp['range'].get('min'), 2) if comp['range'].get('min') is not None else None, + max_amount=round(comp['range'].get('max'), 2) if comp['range'].get('max') is not None else None, + currency=job_detailed['compensation']['currencyCode'] + ) + + extracted_salary = job.get("extractedSalary") + compensation = None + if extracted_salary: + salary_snippet = job.get("salarySnippet") + currency = salary_snippet.get("currency") if salary_snippet else None + interval = (extracted_salary.get("type"),) + if isinstance(interval, tuple): + interval = interval[0] + + interval = interval.upper() + if interval in CompensationInterval.__members__: + compensation = Compensation( + interval=CompensationInterval[interval], + min_amount=int(extracted_salary.get("min")), + max_amount=int(extracted_salary.get("max")), + currency=currency, + ) + return compensation + @staticmethod def parse_jobs(soup: BeautifulSoup) -> dict: """ @@ -333,17 +361,6 @@ class IndeedScraper(Scraper): 'referer': 'https://www.indeed.com/m/jobs?q=software%20intern&l=Dallas%2C%20TX&from=serpso&rq=1&rsIdx=3', } - @staticmethod - def is_remote_job(job: dict) -> bool: - """ - :param job: - :return: bool - """ - for taxonomy in job.get("taxonomyAttributes", []): - if taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0: - return True - return False - @staticmethod def add_params(scraper_input: ScraperInput, page: int) -> dict[str, str | Any]: params = { @@ -369,3 +386,115 @@ class IndeedScraper(Scraper): params['iafilter'] = 1 return params + + @staticmethod + def is_job_remote(job: dict, job_detailed: dict, description: str) -> bool: + remote_keywords = ['remote', 'work from home', 'wfh'] + is_remote_in_attributes = any( + any(keyword in attr['label'].lower() for keyword in remote_keywords) + for attr in job_detailed['attributes'] + ) + is_remote_in_description = any(keyword in description.lower() for keyword in remote_keywords) + is_remote_in_location = any( + keyword in job_detailed['location']['formatted']['long'].lower() + for keyword in remote_keywords + ) + is_remote_in_taxonomy = any( + taxonomy["label"] == "remote" and len(taxonomy["attributes"]) > 0 + for taxonomy in job.get("taxonomyAttributes", []) + ) + return is_remote_in_attributes or is_remote_in_description or is_remote_in_location + + @staticmethod + def get_job_details(job_keys: list[str]) -> dict: + """ + Queries the GraphQL endpoint for detailed job information for the given job keys. + """ + url = "https://apis.indeed.com/graphql" + headers = { + 'Host': 'apis.indeed.com', + 'content-type': 'application/json', + 'indeed-api-key': '161092c2017b5bbab13edb12461a62d5a833871e7cad6d9d475304573de67ac8', + 'accept': 'application/json', + 'indeed-locale': 'en-US', + 'accept-language': 'en-US,en;q=0.9', + 'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 16_6_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 Indeed App 193.1', + 'indeed-app-info': 'appv=193.1; appid=com.indeed.jobsearch; osv=16.6.1; os=ios; dtype=phone', + 'indeed-co': 'US', + } + + job_keys_gql = '[' + ', '.join(f'"{key}"' for key in job_keys) + ']' + + payload = { + "query": f""" + query GetJobData {{ + jobData(input: {{ + jobKeys: {job_keys_gql} + }}) {{ + results {{ + job {{ + key + title + description {{ + html + }} + location {{ + countryName + countryCode + city + postalCode + streetAddress + formatted {{ + short + long + }} + }} + compensation {{ + baseSalary {{ + unitOfWork + range {{ + ... on Range {{ + min + max + }} + }} + }} + currencyCode + }} + attributes {{ + label + }} + employer {{ + relativeCompanyPageUrl + }} + recruit {{ + viewJobUrl + detailedSalary + workSchedule + }} + }} + }} + }} + }} + """ + } + response = requests.post(url, headers=headers, json=payload) + if response.status_code == 200: + return response.json()['data']['jobData']['results'] + else: + return {} + + @staticmethod + def get_correct_interval(interval: str) -> CompensationInterval: + interval_mapping = { + "DAY": "DAILY", + "YEAR": "YEARLY", + "HOUR": "HOURLY", + "WEEK": "WEEKLY", + "MONTH": "MONTHLY" + } + mapped_interval = interval_mapping.get(interval.upper(), None) + if mapped_interval and mapped_interval in CompensationInterval.__members__: + return CompensationInterval[mapped_interval] + else: + raise ValueError(f"Unsupported interval: {interval}")