refactor(JobRespons): place result count near top of response

pull/12/head
Cullen Watson 2023-07-11 12:24:04 -05:00
parent 804646d91b
commit 05b54190a0
3 changed files with 26 additions and 36 deletions

View File

@ -57,15 +57,13 @@ class JobResponse(BaseModel):
success: bool success: bool
error: str = None error: str = None
jobs: list[JobPost] = []
total_results: int = None total_results: int = None
returned_results: int = None returned_results: int = None
jobs: list[JobPost] = []
@validator("returned_results") @validator("returned_results")
def set_returned_results(cls, v, values): def set_returned_results(cls, v, values):
if v is None and values.get("jobs"): if v is None and values.get("jobs"):
return len(values["jobs"]) return len(values["jobs"])
return v return v

View File

@ -33,12 +33,8 @@ class IndeedScraper(Scraper):
self.seen_urls = set() self.seen_urls = set()
def scrape_page( def scrape_page(
self, self, scraper_input: ScraperInput, page: int, session: tls_client.Session
scraper_input: ScraperInput,
page: int,
session: tls_client.Session
) -> tuple[list[JobPost], int]: ) -> tuple[list[JobPost], int]:
""" """
Scrapes a page of Indeed for jobs with scraper_input criteria Scrapes a page of Indeed for jobs with scraper_input criteria
:param scraper_input: :param scraper_input:
@ -74,7 +70,9 @@ class IndeedScraper(Scraper):
soup = BeautifulSoup(response.content, "html.parser") soup = BeautifulSoup(response.content, "html.parser")
jobs = IndeedScraper.parse_jobs(soup) #: can raise exception, handled by main scrape function jobs = IndeedScraper.parse_jobs(
soup
) #: can raise exception, handled by main scrape function
total_num_jobs = IndeedScraper.total_jobs(soup) total_num_jobs = IndeedScraper.total_jobs(soup)
if ( if (
@ -82,7 +80,7 @@ class IndeedScraper(Scraper):
.get("mosaicProviderJobCardsModel", {}) .get("mosaicProviderJobCardsModel", {})
.get("results") .get("results")
): ):
raise Exception('No jobs found.') raise Exception("No jobs found.")
for job in jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]: for job in jobs["metaData"]["mosaicProviderJobCardsModel"]["results"]:
job_url = f'{self.job_url}{job["jobkey"]}' job_url = f'{self.job_url}{job["jobkey"]}'
@ -95,9 +93,7 @@ class IndeedScraper(Scraper):
compensation = None compensation = None
if extracted_salary: if extracted_salary:
salary_snippet = job.get("salarySnippet") salary_snippet = job.get("salarySnippet")
currency = ( currency = salary_snippet.get("currency") if salary_snippet else None
salary_snippet.get("currency") if salary_snippet else None
)
interval = (extracted_salary.get("type"),) interval = (extracted_salary.get("type"),)
if isinstance(interval, tuple): if isinstance(interval, tuple):
interval = interval[0] interval = interval[0]
@ -145,7 +141,9 @@ class IndeedScraper(Scraper):
client_identifier="chrome112", random_tls_extension_order=True client_identifier="chrome112", random_tls_extension_order=True
) )
pages_to_process = math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1 pages_to_process = (
math.ceil(scraper_input.results_wanted / self.jobs_per_page) - 1
)
try: try:
#: get first page to initialize session #: get first page to initialize session
@ -153,9 +151,8 @@ class IndeedScraper(Scraper):
with ThreadPoolExecutor(max_workers=10) as executor: with ThreadPoolExecutor(max_workers=10) as executor:
futures: list[Future] = [ futures: list[Future] = [
executor.submit( executor.submit(self.scrape_page, scraper_input, page, session)
self.scrape_page, scraper_input, page, session for page in range(1, pages_to_process + 1)
) for page in range(1, pages_to_process + 1)
] ]
for future in futures: for future in futures:
@ -180,7 +177,7 @@ class IndeedScraper(Scraper):
) )
if len(job_list) > scraper_input.results_wanted: if len(job_list) > scraper_input.results_wanted:
job_list = job_list[:scraper_input.results_wanted] job_list = job_list[: scraper_input.results_wanted]
job_response = JobResponse( job_response = JobResponse(
success=True, success=True,

View File

@ -26,12 +26,8 @@ class ZipRecruiterScraper(Scraper):
self.seen_urls = set() self.seen_urls = set()
def scrape_page( def scrape_page(
self, self, scraper_input: ScraperInput, page: int, session: tls_client.Session
scraper_input: ScraperInput,
page: int,
session: tls_client.Session
) -> tuple[list[JobPost], int | None]: ) -> tuple[list[JobPost], int | None]:
""" """
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
:param scraper_input: :param scraper_input:
@ -140,9 +136,8 @@ class ZipRecruiterScraper(Scraper):
with ThreadPoolExecutor(max_workers=10) as executor: with ThreadPoolExecutor(max_workers=10) as executor:
futures: list[Future] = [ futures: list[Future] = [
executor.submit( executor.submit(self.scrape_page, scraper_input, page, session)
self.scrape_page, scraper_input, page, session for page in range(2, pages_to_process + 1)
) for page in range(2, pages_to_process + 1)
] ]
for future in futures: for future in futures:
@ -159,7 +154,7 @@ class ZipRecruiterScraper(Scraper):
#: note: this does not handle if the results are more or less than the results_wanted #: note: this does not handle if the results are more or less than the results_wanted
if len(job_list) > scraper_input.results_wanted: if len(job_list) > scraper_input.results_wanted:
job_list = job_list[:scraper_input.results_wanted] job_list = job_list[: scraper_input.results_wanted]
job_response = JobResponse( job_response = JobResponse(
success=True, success=True,