only keep indeed and add location.json

pull/53/head
djv 2023-10-01 12:13:25 +08:00
parent 4ae48d83c4
commit 6d2cdcf813
11 changed files with 3869 additions and 832 deletions

View File

@ -1,17 +1,16 @@
import json
import os
from jobspy import scrape_jobs from jobspy import scrape_jobs
import pandas as pd import pandas as pd
jobs: pd.DataFrame = scrape_jobs(
# site_name=["indeed", "linkedin", "zip_recruiter"], # load location list
site_name=["indeed"], def read_location_list(location_file):
search_term="software engineer", with open(location_file) as f:
location="Dallas, TX", location_list = [location['name'] for location in json.load(f)]
results_wanted=20, # be wary the higher it is, the more likey you'll get blocked (rotating proxy should work tho) return location_list
country_indeed='USA',
# offset=25 # start jobs from an offset (use if search failed and want to continue)
# proxy="http://34.120.172.140:8123",
proxy="http://crawler-gost-proxy.jobright-internal.com:8080",
)
# formatting for pandas # formatting for pandas
pd.set_option('display.max_columns', None) pd.set_option('display.max_columns', None)
@ -19,15 +18,23 @@ pd.set_option('display.max_rows', None)
pd.set_option('display.width', None) pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc pd.set_option('display.max_colwidth', 50) # set to 0 to see full job url / desc
# 1: output to console # fetch jobs for each location
print(jobs) locations = read_location_list('location.json')
for location in locations:
jobs: pd.DataFrame = scrape_jobs(
# site_name=["indeed", "linkedin", "zip_recruiter"],
site_name=["indeed"],
search_term="software engineer",
location=location,
results_wanted=30,
# be wary the higher it is, the more likey you'll get blocked (rotating proxy should work tho)
country_indeed='USA',
# offset=25 # start jobs from an offset (use if search failed and want to continue)
proxy="http://34.120.172.140:8123",
# proxy="http://crawler-gost-proxy.jobright-internal.com:8080",
)
# 2: output to .csv if os.path.isfile('./jobs.csv'):
jobs.to_csv('./jobs.csv', index=False) jobs.to_csv('./jobs.csv', index=False, mode='a', header=False)
print('outputted to jobs.csv') else:
jobs.to_csv('./jobs.csv', index=False, mode='a', header=True)
# 3: output to .xlsx
# jobs.to_xlsx('jobs.xlsx', index=False)
# 4: display in Jupyter Notebook (1. pip install jupyter 2. jupyter notebook)
# display(jobs)

3838
examples/location.json Normal file

File diff suppressed because it is too large Load Diff

View File

@ -5,19 +5,11 @@ from typing import List, Tuple, Optional
from .jobs import JobType, Location from .jobs import JobType, Location
from .scrapers.indeed import IndeedScraper from .scrapers.indeed import IndeedScraper
from .scrapers.ziprecruiter import ZipRecruiterScraper
from .scrapers.linkedin import LinkedInScraper
from .scrapers import ScraperInput, Site, JobResponse, Country from .scrapers import ScraperInput, Site, JobResponse, Country
from .scrapers.exceptions import ( from .scrapers.exceptions import IndeedException
LinkedInException,
IndeedException,
ZipRecruiterException,
)
SCRAPER_MAPPING = { SCRAPER_MAPPING = {
Site.LINKEDIN: LinkedInScraper,
Site.INDEED: IndeedScraper, Site.INDEED: IndeedScraper,
Site.ZIP_RECRUITER: ZipRecruiterScraper,
} }
@ -81,16 +73,12 @@ def scrape_jobs(
try: try:
scraped_data: JobResponse = scraper.scrape(scraper_input) scraped_data: JobResponse = scraper.scrape(scraper_input)
except (LinkedInException, IndeedException, ZipRecruiterException) as lie: except IndeedException as lie:
raise lie raise lie
except Exception as e: except Exception as e:
# unhandled exceptions # unhandled exceptions
if site == Site.LINKEDIN:
raise LinkedInException()
if site == Site.INDEED: if site == Site.INDEED:
raise IndeedException() raise IndeedException()
if site == Site.ZIP_RECRUITER:
raise ZipRecruiterException()
else: else:
raise e raise e
return site.value, scraped_data return site.value, scraped_data

View File

@ -6,13 +6,5 @@ This module contains the set of Scrapers' exceptions.
""" """
class LinkedInException(Exception):
"""Failed to scrape LinkedIn"""
class IndeedException(Exception): class IndeedException(Exception):
"""Failed to scrape Indeed""" """Failed to scrape Indeed"""
class ZipRecruiterException(Exception):
"""Failed to scrape ZipRecruiter"""

View File

@ -1,271 +0,0 @@
"""
jobspy.scrapers.linkedin
~~~~~~~~~~~~~~~~~~~
This module contains routines to scrape LinkedIn.
"""
from typing import Optional
from datetime import datetime
import requests
import time
import re
from requests.exceptions import ProxyError
from concurrent.futures import ThreadPoolExecutor, as_completed
from bs4 import BeautifulSoup
from bs4.element import Tag
from threading import Lock
from .. import Scraper, ScraperInput, Site
from ..exceptions import LinkedInException
from ...jobs import (
JobPost,
Location,
JobResponse,
JobType,
)
def extract_emails_from_text(text: str) -> Optional[list[str]]:
if not text:
return None
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
return email_regex.findall(text)
class LinkedInScraper(Scraper):
MAX_RETRIES = 3
DELAY = 10
def __init__(self, proxy: Optional[str] = None):
"""
Initializes LinkedInScraper with the LinkedIn job search url
"""
site = Site(Site.LINKEDIN)
self.country = "worldwide"
self.url = "https://www.linkedin.com"
super().__init__(site, proxy=proxy)
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
"""
Scrapes LinkedIn for jobs with scraper_input criteria
:param scraper_input:
:return: job_response
"""
job_list: list[JobPost] = []
seen_urls = set()
url_lock = Lock()
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
def job_type_code(job_type_enum):
mapping = {
JobType.FULL_TIME: "F",
JobType.PART_TIME: "P",
JobType.INTERNSHIP: "I",
JobType.CONTRACT: "C",
JobType.TEMPORARY: "T",
}
return mapping.get(job_type_enum, "")
while len(job_list) < scraper_input.results_wanted and page < 1000:
params = {
"keywords": scraper_input.search_term,
"location": scraper_input.location,
"distance": scraper_input.distance,
"f_WT": 2 if scraper_input.is_remote else None,
"f_JT": job_type_code(scraper_input.job_type)
if scraper_input.job_type
else None,
"pageNum": 0,
page: page + scraper_input.offset,
"f_AL": "true" if scraper_input.easy_apply else None,
}
params = {k: v for k, v in params.items() if v is not None}
params = {k: v for k, v in params.items() if v is not None}
retries = 0
while retries < self.MAX_RETRIES:
try:
response = requests.get(
f"{self.url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
params=params,
allow_redirects=True,
proxies=self.proxy,
timeout=10,
)
response.raise_for_status()
break
except requests.HTTPError as e:
if hasattr(e, 'response') and e.response is not None:
if e.response.status_code == 429:
time.sleep(self.DELAY)
retries += 1
continue
else:
raise LinkedInException(f"bad response status code: {e.response.status_code}")
else:
raise
except ProxyError as e:
raise LinkedInException("bad proxy")
except Exception as e:
raise LinkedInException(str(e))
else:
# Raise an exception if the maximum number of retries is reached
raise LinkedInException("Max retries reached, failed to get a valid response")
soup = BeautifulSoup(response.text, "html.parser")
with ThreadPoolExecutor(max_workers=5) as executor:
futures = []
for job_card in soup.find_all("div", class_="base-search-card"):
job_url = None
href_tag = job_card.find("a", class_="base-card__full-link")
if href_tag and "href" in href_tag.attrs:
href = href_tag.attrs["href"].split("?")[0]
job_id = href.split("-")[-1]
job_url = f"{self.url}/jobs/view/{job_id}"
with url_lock:
if job_url in seen_urls:
continue
seen_urls.add(job_url)
futures.append(executor.submit(self.process_job, job_card, job_url))
for future in as_completed(futures):
try:
job_post = future.result()
if job_post:
job_list.append(job_post)
except Exception as e:
raise LinkedInException("Exception occurred while processing jobs")
page += 25
job_list = job_list[: scraper_input.results_wanted]
return JobResponse(jobs=job_list)
def process_job(self, job_card: Tag, job_url: str) -> Optional[JobPost]:
title_tag = job_card.find("span", class_="sr-only")
title = title_tag.get_text(strip=True) if title_tag else "N/A"
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
company_a_tag = company_tag.find("a") if company_tag else None
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
metadata_card = job_card.find("div", class_="base-search-card__metadata")
location = self.get_location(metadata_card)
datetime_tag = metadata_card.find("time", class_="job-search-card__listdate") if metadata_card else None
date_posted = None
if datetime_tag and "datetime" in datetime_tag.attrs:
datetime_str = datetime_tag["datetime"]
try:
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
except Exception as e:
date_posted = None
benefits_tag = job_card.find("span", class_="result-benefits__text")
benefits = " ".join(benefits_tag.get_text().split()) if benefits_tag else None
description, job_type = self.get_job_description(job_url)
return JobPost(
title=title,
description=description,
company_name=company,
location=location,
date_posted=date_posted,
job_url=job_url,
job_type=job_type,
benefits=benefits,
emails=extract_emails_from_text(description)
)
def get_job_description(self, job_page_url: str) -> tuple[None, None] | tuple[
str | None, tuple[str | None, JobType | None]]:
"""
Retrieves job description by going to the job page url
:param job_page_url:
:return: description or None
"""
try:
response = requests.get(job_page_url, timeout=5, proxies=self.proxy)
response.raise_for_status()
except Exception as e:
return None, None
soup = BeautifulSoup(response.text, "html.parser")
div_content = soup.find(
"div", class_=lambda x: x and "show-more-less-html__markup" in x
)
description = None
if div_content:
description = " ".join(div_content.get_text().split()).strip()
def get_job_type(
soup_job_type: BeautifulSoup,
) -> JobType | None:
"""
Gets the job type from job page
:param soup_job_type:
:return: JobType
"""
h3_tag = soup_job_type.find(
"h3",
class_="description__job-criteria-subheader",
string=lambda text: "Employment type" in text,
)
employment_type = None
if h3_tag:
employment_type_span = h3_tag.find_next_sibling(
"span",
class_="description__job-criteria-text description__job-criteria-text--criteria",
)
if employment_type_span:
employment_type = employment_type_span.get_text(strip=True)
employment_type = employment_type.lower()
employment_type = employment_type.replace("-", "")
return LinkedInScraper.get_enum_from_value(employment_type)
return description, get_job_type(soup)
@staticmethod
def get_enum_from_value(value_str):
for job_type in JobType:
if value_str in job_type.value:
return job_type
return None
def get_location(self, metadata_card: Optional[Tag]) -> Location:
"""
Extracts the location data from the job metadata card.
:param metadata_card
:return: location
"""
location = Location(country=self.country)
if metadata_card is not None:
location_tag = metadata_card.find(
"span", class_="job-search-card__location"
)
location_string = location_tag.text.strip() if location_tag else "N/A"
parts = location_string.split(", ")
if len(parts) == 2:
city, state = parts
location = Location(
city=city,
state=state,
country=self.country,
)
return location
def extract_emails_from_text(text: str) -> Optional[list[str]]:
if not text:
return None
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
return email_regex.findall(text)

View File

@ -1,474 +0,0 @@
"""
jobspy.scrapers.ziprecruiter
~~~~~~~~~~~~~~~~~~~
This module contains routines to scrape ZipRecruiter.
"""
import math
import json
import re
from datetime import datetime, date
from typing import Optional, Tuple, Any
from urllib.parse import urlparse, parse_qs, urlunparse
import tls_client
import requests
from bs4 import BeautifulSoup
from bs4.element import Tag
from concurrent.futures import ThreadPoolExecutor, Future
from .. import Scraper, ScraperInput, Site
from ..exceptions import ZipRecruiterException
from ...jobs import (
JobPost,
Compensation,
CompensationInterval,
Location,
JobResponse,
JobType,
Country,
)
def extract_emails_from_text(text: str) -> Optional[list[str]]:
if not text:
return None
email_regex = re.compile(r"[a-zA-Z0-9._%+-]+@[a-zA-Z0-9.-]+\.[a-zA-Z]{2,}")
return email_regex.findall(text)
class ZipRecruiterScraper(Scraper):
def __init__(self, proxy: Optional[str] = None):
"""
Initializes LinkedInScraper with the ZipRecruiter job search url
"""
site = Site(Site.ZIP_RECRUITER)
self.url = "https://www.ziprecruiter.com"
super().__init__(site, proxy=proxy)
self.jobs_per_page = 20
self.seen_urls = set()
self.session = tls_client.Session(
client_identifier="chrome112", random_tls_extension_order=True
)
def find_jobs_in_page(
self, scraper_input: ScraperInput, page: int
) -> list[JobPost]:
"""
Scrapes a page of ZipRecruiter for jobs with scraper_input criteria
:param scraper_input:
:param page:
:return: jobs found on page
"""
job_list: list[JobPost] = []
try:
response = self.session.get(
f"{self.url}/jobs-search",
headers=ZipRecruiterScraper.headers(),
params=ZipRecruiterScraper.add_params(scraper_input, page),
allow_redirects=True,
proxy=self.proxy,
timeout_seconds=10,
)
if response.status_code != 200:
raise ZipRecruiterException(
f"bad response status code: {response.status_code}"
)
except Exception as e:
if "Proxy responded with non 200 code" in str(e):
raise ZipRecruiterException("bad proxy")
raise ZipRecruiterException(str(e))
else:
soup = BeautifulSoup(response.text, "html.parser")
js_tag = soup.find("script", {"id": "js_variables"})
if js_tag:
page_json = json.loads(js_tag.string)
jobs_list = page_json.get("jobList")
if jobs_list:
page_variant = "javascript"
# print('type javascript', len(jobs_list))
else:
page_variant = "html_2"
jobs_list = soup.find_all("div", {"class": "job_content"})
# print('type 2 html', len(jobs_list))
else:
page_variant = "html_1"
jobs_list = soup.find_all("li", {"class": "job-listing"})
# print('type 1 html', len(jobs_list))
with ThreadPoolExecutor(max_workers=10) as executor:
if page_variant == "javascript":
job_results = [
executor.submit(self.process_job_javascript, job)
for job in jobs_list
]
elif page_variant == "html_1":
job_results = [
executor.submit(self.process_job_html_1, job) for job in jobs_list
]
elif page_variant == "html_2":
job_results = [
executor.submit(self.process_job_html_2, job) for job in jobs_list
]
job_list = [result.result() for result in job_results if result.result()]
return job_list
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
"""
Scrapes ZipRecruiter for jobs with scraper_input criteria
:param scraper_input:
:return: job_response
"""
start_page = (scraper_input.offset // self.jobs_per_page) + 1 if scraper_input.offset else 1
#: get first page to initialize session
job_list: list[JobPost] = self.find_jobs_in_page(scraper_input, start_page)
pages_to_process = max(
3, math.ceil(scraper_input.results_wanted / self.jobs_per_page)
)
with ThreadPoolExecutor(max_workers=10) as executor:
futures: list[Future] = [
executor.submit(self.find_jobs_in_page, scraper_input, page)
for page in range(start_page + 1, start_page + pages_to_process + 2)
]
for future in futures:
jobs = future.result()
job_list += jobs
job_list = job_list[: scraper_input.results_wanted]
return JobResponse(jobs=job_list)
def process_job_html_1(self, job: Tag) -> Optional[JobPost]:
"""
Parses a job from the job content tag
:param job: BeautifulSoup Tag for one job post
:return JobPost
TODO this method isnt finished due to not encountering this type of html often
"""
job_url = self.cleanurl(job.find("a", {"class": "job_link"})["href"])
if job_url in self.seen_urls:
return None
title = job.find("h2", {"class": "title"}).text
company = job.find("a", {"class": "company_name"}).text.strip()
description, updated_job_url = self.get_description(job_url)
# job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = job.find("p", {"class": "job_snippet"}).text.strip()
job_type_element = job.find("li", {"class": "perk_item perk_type"})
job_type = None
if job_type_element:
job_type_text = (
job_type_element.text.strip().lower().replace("_", "").replace(" ", "")
)
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
date_posted = ZipRecruiterScraper.get_date_posted(job)
job_post = JobPost(
title=title,
description=description,
company_name=company,
location=ZipRecruiterScraper.get_location(job),
job_type=job_type,
compensation=ZipRecruiterScraper.get_compensation(job),
date_posted=date_posted,
job_url=job_url,
emails=extract_emails_from_text(description),
)
return job_post
def process_job_html_2(self, job: Tag) -> Optional[JobPost]:
"""
Parses a job from the job content tag for a second variat of HTML that ZR uses
:param job: BeautifulSoup Tag for one job post
:return JobPost
"""
job_url = self.cleanurl(job.find("a", class_="job_link")["href"])
title = job.find("h2", class_="title").text
company = job.find("a", class_="company_name").text.strip()
description, updated_job_url = self.get_description(job_url)
# job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = job.find("p", class_="job_snippet").get_text().strip()
job_type_text = job.find("li", class_="perk_item perk_type")
job_type = None
if job_type_text:
job_type_text = (
job_type_text.get_text()
.strip()
.lower()
.replace("-", "")
.replace(" ", "")
)
job_type = ZipRecruiterScraper.get_job_type_enum(job_type_text)
date_posted = ZipRecruiterScraper.get_date_posted(job)
job_post = JobPost(
title=title,
description=description,
company_name=company,
location=ZipRecruiterScraper.get_location(job),
job_type=job_type,
compensation=ZipRecruiterScraper.get_compensation(job),
date_posted=date_posted,
job_url=job_url,
)
return job_post
def process_job_javascript(self, job: dict) -> JobPost:
title = job.get("Title")
job_url = self.cleanurl(job.get("JobURL"))
description, updated_job_url = self.get_description(job_url)
# job_url = updated_job_url if updated_job_url else job_url
if description is None:
description = BeautifulSoup(
job.get("Snippet", "").strip(), "html.parser"
).get_text()
company = job.get("OrgName")
location = Location(
city=job.get("City"), state=job.get("State"), country=Country.US_CANADA
)
job_type = ZipRecruiterScraper.get_job_type_enum(
job.get("EmploymentType", "").replace("-", "").lower()
)
formatted_salary = job.get("FormattedSalaryShort", "")
salary_parts = formatted_salary.split(" ")
min_salary_str = salary_parts[0][1:].replace(",", "")
if "." in min_salary_str:
min_amount = int(float(min_salary_str) * 1000)
else:
min_amount = int(min_salary_str.replace("K", "000"))
if len(salary_parts) >= 3 and salary_parts[2].startswith("$"):
max_salary_str = salary_parts[2][1:].replace(",", "")
if "." in max_salary_str:
max_amount = int(float(max_salary_str) * 1000)
else:
max_amount = int(max_salary_str.replace("K", "000"))
else:
max_amount = 0
compensation = Compensation(
interval=CompensationInterval.YEARLY,
min_amount=min_amount,
max_amount=max_amount,
currency="USD/CAD",
)
save_job_url = job.get("SaveJobURL", "")
posted_time_match = re.search(
r"posted_time=(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}Z)", save_job_url
)
if posted_time_match:
date_time_str = posted_time_match.group(1)
date_posted_obj = datetime.strptime(date_time_str, "%Y-%m-%dT%H:%M:%SZ")
date_posted = date_posted_obj.date()
else:
date_posted = date.today()
return JobPost(
title=title,
description=description,
company_name=company,
location=location,
job_type=job_type,
compensation=compensation,
date_posted=date_posted,
job_url=job_url,
)
return job_post
@staticmethod
def get_job_type_enum(job_type_str: str) -> Optional[JobType]:
for job_type in JobType:
if job_type_str in job_type.value:
a = True
return job_type
return None
def get_description(self, job_page_url: str) -> Tuple[Optional[str], Optional[str]]:
"""
Retrieves job description by going to the job page url
:param job_page_url:
:param session:
:return: description or None, response url
"""
try:
response = requests.get(
job_page_url,
headers=ZipRecruiterScraper.headers(),
allow_redirects=True,
timeout=5,
proxies=self.proxy,
)
if response.status_code not in range(200, 400):
return None, None
except Exception as e:
return None, None
html_string = response.content
soup_job = BeautifulSoup(html_string, "html.parser")
job_description_div = soup_job.find("div", {"class": "job_description"})
if job_description_div:
return job_description_div.text.strip(), response.url
return None, response.url
@staticmethod
def add_params(scraper_input, page) -> dict[str, str | Any]:
params = {
"search": scraper_input.search_term,
"location": scraper_input.location,
"page": page,
"form": "jobs-landing",
}
job_type_value = None
if scraper_input.job_type:
if scraper_input.job_type.value == "fulltime":
job_type_value = "full_time"
elif scraper_input.job_type.value == "parttime":
job_type_value = "part_time"
else:
job_type_value = scraper_input.job_type.value
if job_type_value:
params[
"refine_by_employment"
] = f"employment_type:employment_type:{job_type_value}"
if scraper_input.is_remote:
params["refine_by_location_type"] = "only_remote"
if scraper_input.distance:
params["radius"] = scraper_input.distance
return params
@staticmethod
def get_interval(interval_str: str):
"""
Maps the interval alias to its appropriate CompensationInterval.
:param interval_str
:return: CompensationInterval
"""
interval_alias = {"annually": CompensationInterval.YEARLY}
interval_str = interval_str.lower()
if interval_str in interval_alias:
return interval_alias[interval_str]
return CompensationInterval(interval_str)
@staticmethod
def get_date_posted(job: Tag) -> Optional[datetime.date]:
"""
Extracts the date a job was posted
:param job
:return: date the job was posted or None
"""
button = job.find(
"button", {"class": "action_input save_job zrs_btn_secondary_200"}
)
if not button:
return None
url_time = button.get("data-href", "")
url_components = urlparse(url_time)
params = parse_qs(url_components.query)
posted_time_str = params.get("posted_time", [None])[0]
if posted_time_str:
posted_date = datetime.strptime(
posted_time_str, "%Y-%m-%dT%H:%M:%SZ"
).date()
return posted_date
return None
@staticmethod
def get_compensation(job: Tag) -> Optional[Compensation]:
"""
Parses the compensation tag from the job BeautifulSoup object
:param job
:return: Compensation object or None
"""
pay_element = job.find("li", {"class": "perk_item perk_pay"})
if pay_element is None:
return None
pay = pay_element.find("div", {"class": "value"}).find("span").text.strip()
def create_compensation_object(pay_string: str) -> Compensation:
"""
Creates a Compensation object from a pay_string
:param pay_string
:return: compensation
"""
interval = ZipRecruiterScraper.get_interval(pay_string.split()[-1])
amounts = []
for amount in pay_string.split("to"):
amount = amount.replace(",", "").strip("$ ").split(" ")[0]
if "K" in amount:
amount = amount.replace("K", "")
amount = int(float(amount)) * 1000
else:
amount = int(float(amount))
amounts.append(amount)
compensation = Compensation(
interval=interval,
min_amount=min(amounts),
max_amount=max(amounts),
currency="USD/CAD",
)
return compensation
return create_compensation_object(pay)
@staticmethod
def get_location(job: Tag) -> Location:
"""
Extracts the job location from BeatifulSoup object
:param job:
:return: location
"""
location_link = job.find("a", {"class": "company_location"})
if location_link is not None:
location_string = location_link.text.strip()
parts = location_string.split(", ")
if len(parts) == 2:
city, state = parts
else:
city, state = None, None
else:
city, state = None, None
return Location(city=city, state=state, country=Country.US_CANADA)
@staticmethod
def headers() -> dict:
"""
Returns headers needed for requests
:return: dict - Dictionary containing headers
"""
return {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36"
}
@staticmethod
def cleanurl(url):
parsed_url = urlparse(url)
return urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, parsed_url.params, '', ''))

View File

View File

@ -1,12 +0,0 @@
from ..jobspy import scrape_jobs
import pandas as pd
def test_all():
result = scrape_jobs(
site_name=["linkedin", "indeed", "zip_recruiter"],
search_term="software engineer",
results_wanted=5,
)
assert isinstance(result, pd.DataFrame) and not result.empty, "Result should be a non-empty DataFrame"

View File

@ -1,10 +0,0 @@
from ..jobspy import scrape_jobs
import pandas as pd
def test_indeed():
result = scrape_jobs(
site_name="indeed",
search_term="software engineer",
)
assert isinstance(result, pd.DataFrame) and not result.empty, "Result should be a non-empty DataFrame"

View File

@ -1,10 +0,0 @@
from ..jobspy import scrape_jobs
import pandas as pd
def test_linkedin():
result = scrape_jobs(
site_name="linkedin",
search_term="software engineer",
)
assert isinstance(result, pd.DataFrame) and not result.empty, "Result should be a non-empty DataFrame"

View File

@ -1,11 +0,0 @@
from ..jobspy import scrape_jobs
import pandas as pd
def test_ziprecruiter():
result = scrape_jobs(
site_name="zip_recruiter",
search_term="software engineer",
)
assert isinstance(result, pd.DataFrame) and not result.empty, "Result should be a non-empty DataFrame"