From a64acf9c86bfb619d95806ef14fd3059622f218f Mon Sep 17 00:00:00 2001 From: Cullen Watson Date: Fri, 25 Aug 2023 21:17:36 -0500 Subject: [PATCH 1/5] fix(ziprecruiter): get full description --- api/core/scrapers/indeed/__init__.py | 8 +++-- api/core/scrapers/ziprecruiter/__init__.py | 35 +++++++++++++++++++--- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/api/core/scrapers/indeed/__init__.py b/api/core/scrapers/indeed/__init__.py index b8c6429..f04432a 100644 --- a/api/core/scrapers/indeed/__init__.py +++ b/api/core/scrapers/indeed/__init__.py @@ -110,11 +110,15 @@ class IndeedScraper(Scraper): job_type = IndeedScraper.get_job_type(job) timestamp_seconds = job["pubDate"] / 1000 date_posted = datetime.fromtimestamp(timestamp_seconds) - + li_elements = snippet_html.find_all("li") + if li_elements: + description = " ".join(li.text for li in li_elements) + else: + description = None first_li = snippet_html.find("li") job_post = JobPost( title=job["normTitle"], - description=first_li.text if first_li else None, + description=description, company_name=job["company"], location=Location( city=job.get("jobLocationCity"), diff --git a/api/core/scrapers/ziprecruiter/__init__.py b/api/core/scrapers/ziprecruiter/__init__.py index 837d237..29b1728 100644 --- a/api/core/scrapers/ziprecruiter/__init__.py +++ b/api/core/scrapers/ziprecruiter/__init__.py @@ -14,6 +14,8 @@ import math class ZipRecruiterScraper(Scraper): + url = "https://www.ziprecruiter.com" + def __init__(self): """ Initializes LinkedInScraper with the ZipRecruiter job search url @@ -21,7 +23,6 @@ class ZipRecruiterScraper(Scraper): site = Site(Site.ZIP_RECRUITER) super().__init__(site) - self.url = "https://www.ziprecruiter.com/jobs-search" self.jobs_per_page = 20 self.seen_urls = set() @@ -61,7 +62,9 @@ class ZipRecruiterScraper(Scraper): } response = session.get( - self.url, headers=ZipRecruiterScraper.headers(), params=params + self.url + "/jobs-search", + headers=ZipRecruiterScraper.headers(), + params=params, ) if response.status_code != status.HTTP_200_OK: @@ -69,6 +72,7 @@ class ZipRecruiterScraper(Scraper): html_string = response.content soup = BeautifulSoup(html_string, "html.parser") + if page == 1: script_tag = soup.find("script", {"id": "js_variables"}) data = json.loads(script_tag.string) @@ -86,9 +90,12 @@ class ZipRecruiterScraper(Scraper): title = job.find("h2", {"class": "title"}).text company = job.find("a", {"class": "company_name"}).text.strip() - description = job.find("p", {"class": "job_snippet"}).text.strip() - job_type_element = job.find("li", {"class": "perk_item perk_type"}) + description, job_url = ZipRecruiterScraper.get_description(job_url, session) + if description is None: + description = job.find("p", {"class": "job_snippet"}).text.strip() + + job_type_element = job.find("li", {"class": "perk_item perk_type"}) if job_type_element: job_type_text = ( job_type_element.text.strip() @@ -163,6 +170,26 @@ class ZipRecruiterScraper(Scraper): ) return job_response + @classmethod + def get_description(cls, job_page_url: str, session: tls_client.Session) -> str: + """ + Retrieves job description by going to the job page url + :param job_page_url: + :param session: + :return: description or None + """ + response = session.get( + job_page_url, headers=ZipRecruiterScraper.headers(), allow_redirects=True + ) + + html_string = response.content + soup_job = BeautifulSoup(html_string, "html.parser") + + job_description_div = soup_job.find("div", {"class": "job_description"}) + if job_description_div: + return job_description_div.text.strip("\n"), response.url + return None, response.url + @staticmethod def get_interval(interval_str: str): """ From 8b04508b15b881f9bd16eba7690a1e12754b41fc Mon Sep 17 00:00:00 2001 From: Cullen Watson Date: Fri, 25 Aug 2023 21:22:46 -0500 Subject: [PATCH 2/5] fix: return type for get_description --- api/core/scrapers/ziprecruiter/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/scrapers/ziprecruiter/__init__.py b/api/core/scrapers/ziprecruiter/__init__.py index 29b1728..6ac42e5 100644 --- a/api/core/scrapers/ziprecruiter/__init__.py +++ b/api/core/scrapers/ziprecruiter/__init__.py @@ -171,12 +171,12 @@ class ZipRecruiterScraper(Scraper): return job_response @classmethod - def get_description(cls, job_page_url: str, session: tls_client.Session) -> str: + def get_description(cls, job_page_url: str, session: tls_client.Session) -> Tuple[Optional[str], str]: """ Retrieves job description by going to the job page url :param job_page_url: :param session: - :return: description or None + :return: description or None, response url """ response = session.get( job_page_url, headers=ZipRecruiterScraper.headers(), allow_redirects=True From eb728a572a083336062f4dce8ffaffe45357b689 Mon Sep 17 00:00:00 2001 From: Cullen Watson Date: Sat, 26 Aug 2023 04:34:02 -0500 Subject: [PATCH 3/5] feat(ziprecruiter): Add multithreading for individual job handling --- api/core/scrapers/ziprecruiter/__init__.py | 24 +++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/api/core/scrapers/ziprecruiter/__init__.py b/api/core/scrapers/ziprecruiter/__init__.py index 6ac42e5..695d700 100644 --- a/api/core/scrapers/ziprecruiter/__init__.py +++ b/api/core/scrapers/ziprecruiter/__init__.py @@ -83,10 +83,15 @@ class ZipRecruiterScraper(Scraper): job_posts = soup.find_all("div", {"class": "job_content"}) - for job in job_posts: + def process_job(job: Tag) -> Optional[JobPost]: + ''' + Parses a job from the job content tag + :param job: BeautifulSoup Tag for one job post + :return JobPost + ''' job_url = job.find("a", {"class": "job_link"})["href"] if job_url in self.seen_urls: - continue + return None title = job.find("h2", {"class": "title"}).text company = job.find("a", {"class": "company_name"}).text.strip() @@ -121,7 +126,14 @@ class ZipRecruiterScraper(Scraper): date_posted=date_posted, job_url=job_url, ) - job_list.append(job_post) + return job_post + + with ThreadPoolExecutor(max_workers=10) as executor: + job_results: list[Future] = [ + executor.submit(process_job, job) for job in job_posts + ] + + job_list = [result.result() for result in job_results if result.result()] return job_list, job_count @@ -171,12 +183,14 @@ class ZipRecruiterScraper(Scraper): return job_response @classmethod - def get_description(cls, job_page_url: str, session: tls_client.Session) -> Tuple[Optional[str], str]: + def get_description( + cls, job_page_url: str, session: tls_client.Session + ) -> Tuple[Optional[str], str]: """ Retrieves job description by going to the job page url :param job_page_url: :param session: - :return: description or None, response url + :return: description or None, response url """ response = session.get( job_page_url, headers=ZipRecruiterScraper.headers(), allow_redirects=True From b4b836ff713a1c27b92f25a0ddc5f35af5f5ce6e Mon Sep 17 00:00:00 2001 From: Cullen Watson Date: Sat, 26 Aug 2023 05:55:59 -0500 Subject: [PATCH 4/5] fix(indeed): fetch full description --- api/core/scrapers/__init__.py | 3 +- api/core/scrapers/indeed/__init__.py | 46 ++++++++++++++++------ api/core/scrapers/linkedin/__init__.py | 12 +++--- api/core/scrapers/ziprecruiter/__init__.py | 12 +++--- 4 files changed, 48 insertions(+), 25 deletions(-) diff --git a/api/core/scrapers/__init__.py b/api/core/scrapers/__init__.py index 35de67b..401f9bc 100644 --- a/api/core/scrapers/__init__.py +++ b/api/core/scrapers/__init__.py @@ -26,8 +26,9 @@ class ScraperInput(BaseModel): class Scraper: - def __init__(self, site: Site): + def __init__(self, site: Site, url: str): self.site = site + self.url = url def scrape(self, scraper_input: ScraperInput) -> JobResponse: ... diff --git a/api/core/scrapers/indeed/__init__.py b/api/core/scrapers/indeed/__init__.py index f04432a..4bfaa26 100644 --- a/api/core/scrapers/indeed/__init__.py +++ b/api/core/scrapers/indeed/__init__.py @@ -3,6 +3,7 @@ import json from typing import Optional, Tuple, List import tls_client +import urllib.parse from bs4 import BeautifulSoup from bs4.element import Tag from fastapi import status @@ -25,9 +26,8 @@ class IndeedScraper(Scraper): Initializes IndeedScraper with the Indeed job search url """ site = Site(Site.INDEED) - super().__init__(site) - self.url = "https://www.indeed.com/jobs" - self.job_url = "https://www.indeed.com/viewjob?jk=" + url = "https://www.indeed.com" + super().__init__(site, url) self.jobs_per_page = 15 self.seen_urls = set() @@ -60,7 +60,7 @@ class IndeedScraper(Scraper): if sc_values: params["sc"] = "0kf:" + "".join(sc_values) + ";" - response = session.get(self.url, params=params) + response = session.get(self.url + "/jobs", params=params) if ( response.status_code != status.HTTP_200_OK @@ -82,10 +82,10 @@ class IndeedScraper(Scraper): ): raise Exception("No jobs found.") - for job in jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]: - job_url = f'{self.job_url}{job["jobkey"]}' + def process_job(job) -> Optional[JobPost]: + job_url = f'{self.url}/jobs/viewjob?jk={job["jobkey"]}' if job_url in self.seen_urls: - continue + return None snippet_html = BeautifulSoup(job["snippet"], "html.parser") @@ -110,11 +110,8 @@ class IndeedScraper(Scraper): job_type = IndeedScraper.get_job_type(job) timestamp_seconds = job["pubDate"] / 1000 date_posted = datetime.fromtimestamp(timestamp_seconds) - li_elements = snippet_html.find_all("li") - if li_elements: - description = " ".join(li.text for li in li_elements) - else: - description = None + description = self.get_description(job_url, session) + first_li = snippet_html.find("li") job_post = JobPost( title=job["normTitle"], @@ -131,6 +128,10 @@ class IndeedScraper(Scraper): date_posted=date_posted, job_url=job_url, ) + return job_post + + for job in jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]: + job_post = process_job(job) job_list.append(job_post) return job_list, total_num_jobs @@ -190,6 +191,27 @@ class IndeedScraper(Scraper): ) return job_response + def get_description(self, job_page_url: str, session: tls_client.Session) -> str: + """ + Retrieves job description by going to the job page url + :param job_page_url: + :param session: + :return: description + """ + parsed_url = urllib.parse.urlparse(job_page_url) + params = urllib.parse.parse_qs(parsed_url.query) + jk_value = params.get("jk", [None])[0] + formatted_url = f"{self.url}/viewjob?jk={jk_value}&spa=1" + + response = session.get(formatted_url, allow_redirects=True) + + raw_description = response.json()["body"]["jobInfoWrapperModel"][ + "jobInfoModel" + ]["sanitizedJobDescription"] + soup = BeautifulSoup(raw_description, "html.parser") + text_content = " ".join(soup.get_text().split()).strip() + return text_content + @staticmethod def get_job_type(job: dict) -> Optional[JobType]: """ diff --git a/api/core/scrapers/linkedin/__init__.py b/api/core/scrapers/linkedin/__init__.py index c0deb78..7e12143 100644 --- a/api/core/scrapers/linkedin/__init__.py +++ b/api/core/scrapers/linkedin/__init__.py @@ -15,10 +15,8 @@ class LinkedInScraper(Scraper): Initializes LinkedInScraper with the LinkedIn job search url """ site = Site(Site.LINKEDIN) - super().__init__(site) - - self.url = "https://www.linkedin.com/jobs/search/" - self.job_url = "https://www.linkedin.com/jobs/view/" + url = "https://www.linkedin.com" + super().__init__(site, url) def scrape(self, scraper_input: ScraperInput) -> JobResponse: """ @@ -57,7 +55,9 @@ class LinkedInScraper(Scraper): params = {k: v for k, v in params.items() if v is not None} print(params) - response = session.get(self.url, params=params, allow_redirects=True) + response = session.get( + f"{self.url}/jobs/search", params=params, allow_redirects=True + ) if response.status_code != status.HTTP_200_OK: return JobResponse( @@ -82,7 +82,7 @@ class LinkedInScraper(Scraper): job_id = ( data_entity_urn.split(":")[-1] if data_entity_urn else "N/A" ) - job_url = f"{self.job_url}{job_id}" + job_url = f"{self.url}/jobs/view/{job_id}" if job_url in seen_urls: continue seen_urls.add(job_url) diff --git a/api/core/scrapers/ziprecruiter/__init__.py b/api/core/scrapers/ziprecruiter/__init__.py index 695d700..0bedc03 100644 --- a/api/core/scrapers/ziprecruiter/__init__.py +++ b/api/core/scrapers/ziprecruiter/__init__.py @@ -5,6 +5,7 @@ from urllib.parse import urlparse, parse_qs import tls_client from fastapi import status from bs4 import BeautifulSoup +from bs4.element import Tag from concurrent.futures import ThreadPoolExecutor, Future from api.core.jobs import JobPost @@ -14,14 +15,13 @@ import math class ZipRecruiterScraper(Scraper): - url = "https://www.ziprecruiter.com" - def __init__(self): """ Initializes LinkedInScraper with the ZipRecruiter job search url """ site = Site(Site.ZIP_RECRUITER) - super().__init__(site) + url = "https://www.ziprecruiter.com" + super().__init__(site, url) self.jobs_per_page = 20 self.seen_urls = set() @@ -84,11 +84,11 @@ class ZipRecruiterScraper(Scraper): job_posts = soup.find_all("div", {"class": "job_content"}) def process_job(job: Tag) -> Optional[JobPost]: - ''' + """ Parses a job from the job content tag :param job: BeautifulSoup Tag for one job post :return JobPost - ''' + """ job_url = job.find("a", {"class": "job_link"})["href"] if job_url in self.seen_urls: return None @@ -201,7 +201,7 @@ class ZipRecruiterScraper(Scraper): job_description_div = soup_job.find("div", {"class": "job_description"}) if job_description_div: - return job_description_div.text.strip("\n"), response.url + return job_description_div.text.strip(), response.url return None, response.url @staticmethod From fe77c2a1f3813eb9bbb4f02453f20205b3cd179a Mon Sep 17 00:00:00 2001 From: Cullen Watson Date: Sat, 26 Aug 2023 07:07:29 -0500 Subject: [PATCH 5/5] fix(linkedin): fetch full description --- api/core/scrapers/indeed/__init__.py | 7 +++++++ api/core/scrapers/linkedin/__init__.py | 24 +++++++++++++++++++++- api/core/scrapers/ziprecruiter/__init__.py | 6 ++++-- 3 files changed, 34 insertions(+), 3 deletions(-) diff --git a/api/core/scrapers/indeed/__init__.py b/api/core/scrapers/indeed/__init__.py index 4bfaa26..4826081 100644 --- a/api/core/scrapers/indeed/__init__.py +++ b/api/core/scrapers/indeed/__init__.py @@ -110,7 +110,11 @@ class IndeedScraper(Scraper): job_type = IndeedScraper.get_job_type(job) timestamp_seconds = job["pubDate"] / 1000 date_posted = datetime.fromtimestamp(timestamp_seconds) + description = self.get_description(job_url, session) + li_elements = snippet_html.find_all("li") + if description is None and li_elements: + description = " ".join(li.text for li in li_elements) first_li = snippet_html.find("li") job_post = JobPost( @@ -205,6 +209,9 @@ class IndeedScraper(Scraper): response = session.get(formatted_url, allow_redirects=True) + if response.status_code not in range(200, 400): + return None + raw_description = response.json()["body"]["jobInfoWrapperModel"][ "jobInfoModel" ]["sanitizedJobDescription"] diff --git a/api/core/scrapers/linkedin/__init__.py b/api/core/scrapers/linkedin/__init__.py index 7e12143..c7019ad 100644 --- a/api/core/scrapers/linkedin/__init__.py +++ b/api/core/scrapers/linkedin/__init__.py @@ -54,7 +54,6 @@ class LinkedInScraper(Scraper): } params = {k: v for k, v in params.items() if v is not None} - print(params) response = session.get( f"{self.url}/jobs/search", params=params, allow_redirects=True ) @@ -103,6 +102,7 @@ class LinkedInScraper(Scraper): datetime_tag = metadata_card.find( "time", class_="job-search-card__listdate" ) + description = LinkedInScraper.get_description(job_url) if datetime_tag: datetime_str = datetime_tag["datetime"] date_posted = datetime.strptime(datetime_str, "%Y-%m-%d") @@ -111,6 +111,7 @@ class LinkedInScraper(Scraper): job_post = JobPost( title=title, + description=description, company_name=company, location=location, date_posted=date_posted, @@ -138,6 +139,27 @@ class LinkedInScraper(Scraper): ) return job_response + @staticmethod + def get_description(job_page_url: str) -> Optional[str]: + """ + Retrieves job description by going to the job page url + :param job_page_url: + :return: description or None + """ + response = requests.get(job_page_url, allow_redirects=True) + if response.status_code not in range(200, 400): + return None + + soup = BeautifulSoup(response.text, "html.parser") + div_content = soup.find( + "div", class_=lambda x: x and "show-more-less-html__markup" in x + ) + + text_content = None + if div_content: + text_content = " ".join(div_content.get_text().split()).strip() + return text_content + @staticmethod def get_location(metadata_card: Optional[Tag]) -> Location: """ diff --git a/api/core/scrapers/ziprecruiter/__init__.py b/api/core/scrapers/ziprecruiter/__init__.py index 0bedc03..7180fb3 100644 --- a/api/core/scrapers/ziprecruiter/__init__.py +++ b/api/core/scrapers/ziprecruiter/__init__.py @@ -182,9 +182,9 @@ class ZipRecruiterScraper(Scraper): ) return job_response - @classmethod + @staticmethod def get_description( - cls, job_page_url: str, session: tls_client.Session + job_page_url: str, session: tls_client.Session ) -> Tuple[Optional[str], str]: """ Retrieves job description by going to the job page url @@ -195,6 +195,8 @@ class ZipRecruiterScraper(Scraper): response = session.get( job_page_url, headers=ZipRecruiterScraper.headers(), allow_redirects=True ) + if response.status_code not in range(200, 400): + return None html_string = response.content soup_job = BeautifulSoup(html_string, "html.parser")