2023-09-07 09:28:17 -07:00
|
|
|
"""
|
|
|
|
jobspy.scrapers.linkedin
|
|
|
|
~~~~~~~~~~~~~~~~~~~
|
|
|
|
|
|
|
|
This module contains routines to scrape LinkedIn.
|
|
|
|
"""
|
2024-03-10 21:36:27 -07:00
|
|
|
|
|
|
|
from __future__ import annotations
|
|
|
|
|
2024-01-28 19:50:41 -08:00
|
|
|
import time
|
2024-01-09 17:32:51 -08:00
|
|
|
import random
|
2024-04-30 09:36:01 -07:00
|
|
|
import regex as re
|
|
|
|
import urllib.parse
|
2023-09-28 16:11:28 -07:00
|
|
|
from typing import Optional
|
2023-08-31 08:29:43 -07:00
|
|
|
from datetime import datetime
|
2023-07-08 07:34:55 -07:00
|
|
|
|
2023-09-28 16:11:28 -07:00
|
|
|
from threading import Lock
|
2024-01-28 19:50:41 -08:00
|
|
|
from bs4.element import Tag
|
|
|
|
from bs4 import BeautifulSoup
|
2023-11-08 13:51:07 -08:00
|
|
|
from urllib.parse import urlparse, urlunparse
|
2023-07-08 07:34:55 -07:00
|
|
|
|
2023-09-03 07:29:25 -07:00
|
|
|
from .. import Scraper, ScraperInput, Site
|
2023-09-07 09:28:17 -07:00
|
|
|
from ..exceptions import LinkedInException
|
2024-01-09 17:32:51 -08:00
|
|
|
from ..utils import create_session
|
2024-01-28 19:50:41 -08:00
|
|
|
from ...jobs import (
|
|
|
|
JobPost,
|
|
|
|
Location,
|
|
|
|
JobResponse,
|
|
|
|
JobType,
|
|
|
|
Country,
|
2024-02-14 14:04:23 -08:00
|
|
|
Compensation,
|
2024-03-10 21:36:27 -07:00
|
|
|
DescriptionFormat,
|
2024-01-28 19:50:41 -08:00
|
|
|
)
|
|
|
|
from ..utils import (
|
2024-02-14 14:04:23 -08:00
|
|
|
logger,
|
2024-01-28 19:50:41 -08:00
|
|
|
extract_emails_from_text,
|
|
|
|
get_enum_from_job_type,
|
2024-02-14 14:04:23 -08:00
|
|
|
currency_parser,
|
2024-03-10 21:36:27 -07:00
|
|
|
markdown_converter,
|
2024-01-28 19:50:41 -08:00
|
|
|
)
|
2023-09-28 16:33:14 -07:00
|
|
|
|
|
|
|
|
2023-07-08 07:34:55 -07:00
|
|
|
class LinkedInScraper(Scraper):
|
2024-02-14 14:04:23 -08:00
|
|
|
base_url = "https://www.linkedin.com"
|
|
|
|
delay = 3
|
2024-03-04 14:39:38 -08:00
|
|
|
band_delay = 4
|
|
|
|
jobs_per_page = 25
|
2023-09-28 16:11:28 -07:00
|
|
|
|
2023-09-07 09:28:17 -07:00
|
|
|
def __init__(self, proxy: Optional[str] = None):
|
2023-07-10 20:07:19 -07:00
|
|
|
"""
|
|
|
|
Initializes LinkedInScraper with the LinkedIn job search url
|
|
|
|
"""
|
2024-03-04 14:39:38 -08:00
|
|
|
super().__init__(Site(Site.LINKEDIN), proxy=proxy)
|
2024-02-14 14:04:23 -08:00
|
|
|
self.scraper_input = None
|
2023-09-28 16:11:28 -07:00
|
|
|
self.country = "worldwide"
|
2024-04-30 09:36:01 -07:00
|
|
|
self.job_url_direct_regex = re.compile(r'(?<=\?url=)[^"]+')
|
2023-07-08 07:34:55 -07:00
|
|
|
|
|
|
|
def scrape(self, scraper_input: ScraperInput) -> JobResponse:
|
2023-07-10 20:07:19 -07:00
|
|
|
"""
|
|
|
|
Scrapes LinkedIn for jobs with scraper_input criteria
|
|
|
|
:param scraper_input:
|
|
|
|
:return: job_response
|
|
|
|
"""
|
2024-02-14 14:04:23 -08:00
|
|
|
self.scraper_input = scraper_input
|
2023-07-08 07:34:55 -07:00
|
|
|
job_list: list[JobPost] = []
|
2023-07-10 20:07:19 -07:00
|
|
|
seen_urls = set()
|
2023-09-28 16:11:28 -07:00
|
|
|
url_lock = Lock()
|
|
|
|
page = scraper_input.offset // 25 + 25 if scraper_input.offset else 0
|
2024-02-09 12:02:03 -08:00
|
|
|
seconds_old = (
|
2024-03-10 21:36:27 -07:00
|
|
|
scraper_input.hours_old * 3600 if scraper_input.hours_old else None
|
|
|
|
)
|
|
|
|
continue_search = (
|
|
|
|
lambda: len(job_list) < scraper_input.results_wanted and page < 1000
|
2024-02-09 12:02:03 -08:00
|
|
|
)
|
2024-02-04 07:21:45 -08:00
|
|
|
while continue_search():
|
2024-03-10 21:36:27 -07:00
|
|
|
logger.info(f"LinkedIn search page: {page // 25 + 1}")
|
2024-01-09 17:32:51 -08:00
|
|
|
session = create_session(is_tls=False, has_retry=True, delay=5)
|
2023-09-28 16:11:28 -07:00
|
|
|
params = {
|
|
|
|
"keywords": scraper_input.search_term,
|
|
|
|
"location": scraper_input.location,
|
|
|
|
"distance": scraper_input.distance,
|
|
|
|
"f_WT": 2 if scraper_input.is_remote else None,
|
2024-03-10 21:36:27 -07:00
|
|
|
"f_JT": (
|
|
|
|
self.job_type_code(scraper_input.job_type)
|
|
|
|
if scraper_input.job_type
|
|
|
|
else None
|
|
|
|
),
|
2023-09-28 16:11:28 -07:00
|
|
|
"pageNum": 0,
|
2023-11-08 13:51:07 -08:00
|
|
|
"start": page + scraper_input.offset,
|
2023-09-28 16:11:28 -07:00
|
|
|
"f_AL": "true" if scraper_input.easy_apply else None,
|
2024-03-10 21:36:27 -07:00
|
|
|
"f_C": (
|
|
|
|
",".join(map(str, scraper_input.linkedin_company_ids))
|
|
|
|
if scraper_input.linkedin_company_ids
|
|
|
|
else None
|
|
|
|
),
|
2023-09-28 16:11:28 -07:00
|
|
|
}
|
2024-03-04 14:39:38 -08:00
|
|
|
if seconds_old is not None:
|
|
|
|
params["f_TPR"] = f"r{seconds_old}"
|
2023-09-28 16:11:28 -07:00
|
|
|
|
|
|
|
params = {k: v for k, v in params.items() if v is not None}
|
2024-01-09 17:32:51 -08:00
|
|
|
try:
|
|
|
|
response = session.get(
|
2024-02-14 14:04:23 -08:00
|
|
|
f"{self.base_url}/jobs-guest/jobs/api/seeMoreJobPostings/search?",
|
2024-01-09 17:32:51 -08:00
|
|
|
params=params,
|
|
|
|
allow_redirects=True,
|
|
|
|
proxies=self.proxy,
|
2024-02-14 14:04:23 -08:00
|
|
|
headers=self.headers,
|
2024-01-09 17:32:51 -08:00
|
|
|
timeout=10,
|
2023-10-10 09:23:04 -07:00
|
|
|
)
|
2024-02-14 14:04:23 -08:00
|
|
|
if response.status_code not in range(200, 400):
|
|
|
|
if response.status_code == 429:
|
2024-03-10 21:36:27 -07:00
|
|
|
err = (
|
|
|
|
f"429 Response - Blocked by LinkedIn for too many requests"
|
|
|
|
)
|
2024-02-14 14:04:23 -08:00
|
|
|
else:
|
2024-03-10 21:36:27 -07:00
|
|
|
err = f"LinkedIn response status code {response.status_code}"
|
|
|
|
err += f" - {response.text}"
|
|
|
|
logger.error(err)
|
2024-03-04 14:39:38 -08:00
|
|
|
return JobResponse(jobs=job_list)
|
2024-01-09 17:32:51 -08:00
|
|
|
except Exception as e:
|
2024-02-14 14:04:23 -08:00
|
|
|
if "Proxy responded with" in str(e):
|
2024-03-10 21:36:27 -07:00
|
|
|
logger.error(f"LinkedIn: Bad proxy")
|
2024-02-14 14:04:23 -08:00
|
|
|
else:
|
2024-03-10 21:36:27 -07:00
|
|
|
logger.error(f"LinkedIn: {str(e)}")
|
2024-03-04 14:39:38 -08:00
|
|
|
return JobResponse(jobs=job_list)
|
2023-07-10 20:07:19 -07:00
|
|
|
|
2023-09-28 16:11:28 -07:00
|
|
|
soup = BeautifulSoup(response.text, "html.parser")
|
2024-01-10 12:01:10 -08:00
|
|
|
job_cards = soup.find_all("div", class_="base-search-card")
|
|
|
|
if len(job_cards) == 0:
|
|
|
|
return JobResponse(jobs=job_list)
|
2023-07-10 20:07:19 -07:00
|
|
|
|
2024-01-10 12:01:10 -08:00
|
|
|
for job_card in job_cards:
|
2023-11-08 13:51:07 -08:00
|
|
|
job_url = None
|
|
|
|
href_tag = job_card.find("a", class_="base-card__full-link")
|
|
|
|
if href_tag and "href" in href_tag.attrs:
|
|
|
|
href = href_tag.attrs["href"].split("?")[0]
|
|
|
|
job_id = href.split("-")[-1]
|
2024-02-14 14:04:23 -08:00
|
|
|
job_url = f"{self.base_url}/jobs/view/{job_id}"
|
2023-11-08 13:51:07 -08:00
|
|
|
|
|
|
|
with url_lock:
|
|
|
|
if job_url in seen_urls:
|
|
|
|
continue
|
|
|
|
seen_urls.add(job_url)
|
|
|
|
try:
|
2024-03-10 21:36:27 -07:00
|
|
|
fetch_desc = scraper_input.linkedin_fetch_description
|
|
|
|
job_post = self._process_job(job_card, job_url, fetch_desc)
|
2023-11-08 13:51:07 -08:00
|
|
|
if job_post:
|
|
|
|
job_list.append(job_post)
|
2024-02-14 14:04:23 -08:00
|
|
|
if not continue_search():
|
2024-03-10 21:36:27 -07:00
|
|
|
break
|
2023-11-08 13:51:07 -08:00
|
|
|
except Exception as e:
|
2024-02-14 14:04:23 -08:00
|
|
|
raise LinkedInException(str(e))
|
2023-11-08 13:51:07 -08:00
|
|
|
|
2024-02-04 07:21:45 -08:00
|
|
|
if continue_search():
|
2024-03-04 14:39:38 -08:00
|
|
|
time.sleep(random.uniform(self.delay, self.delay + self.band_delay))
|
|
|
|
page += self.jobs_per_page
|
2023-07-10 20:07:19 -07:00
|
|
|
|
|
|
|
job_list = job_list[: scraper_input.results_wanted]
|
2023-09-07 09:28:17 -07:00
|
|
|
return JobResponse(jobs=job_list)
|
2023-07-08 07:34:55 -07:00
|
|
|
|
2024-03-10 21:36:27 -07:00
|
|
|
def _process_job(
|
|
|
|
self, job_card: Tag, job_url: str, full_descr: bool
|
|
|
|
) -> Optional[JobPost]:
|
|
|
|
salary_tag = job_card.find("span", class_="job-search-card__salary-info")
|
2023-11-09 12:57:15 -08:00
|
|
|
|
|
|
|
compensation = None
|
|
|
|
if salary_tag:
|
2024-02-09 12:02:03 -08:00
|
|
|
salary_text = salary_tag.get_text(separator=" ").strip()
|
|
|
|
salary_values = [currency_parser(value) for value in salary_text.split("-")]
|
2023-11-09 12:57:15 -08:00
|
|
|
salary_min = salary_values[0]
|
|
|
|
salary_max = salary_values[1]
|
2024-02-09 12:02:03 -08:00
|
|
|
currency = salary_text[0] if salary_text[0] != "$" else "USD"
|
2023-11-09 12:57:15 -08:00
|
|
|
|
|
|
|
compensation = Compensation(
|
|
|
|
min_amount=int(salary_min),
|
|
|
|
max_amount=int(salary_max),
|
|
|
|
currency=currency,
|
|
|
|
)
|
|
|
|
|
2023-09-28 16:11:28 -07:00
|
|
|
title_tag = job_card.find("span", class_="sr-only")
|
|
|
|
title = title_tag.get_text(strip=True) if title_tag else "N/A"
|
|
|
|
|
|
|
|
company_tag = job_card.find("h4", class_="base-search-card__subtitle")
|
|
|
|
company_a_tag = company_tag.find("a") if company_tag else None
|
2023-11-08 13:51:07 -08:00
|
|
|
company_url = (
|
|
|
|
urlunparse(urlparse(company_a_tag.get("href"))._replace(query=""))
|
|
|
|
if company_a_tag and company_a_tag.has_attr("href")
|
|
|
|
else ""
|
|
|
|
)
|
2023-09-28 16:11:28 -07:00
|
|
|
company = company_a_tag.get_text(strip=True) if company_a_tag else "N/A"
|
|
|
|
|
|
|
|
metadata_card = job_card.find("div", class_="base-search-card__metadata")
|
2024-02-14 14:04:23 -08:00
|
|
|
location = self._get_location(metadata_card)
|
2023-09-28 16:11:28 -07:00
|
|
|
|
2023-10-10 09:23:04 -07:00
|
|
|
datetime_tag = (
|
|
|
|
metadata_card.find("time", class_="job-search-card__listdate")
|
|
|
|
if metadata_card
|
|
|
|
else None
|
|
|
|
)
|
2024-04-30 10:03:10 -07:00
|
|
|
date_posted = None
|
2023-09-28 16:11:28 -07:00
|
|
|
if datetime_tag and "datetime" in datetime_tag.attrs:
|
|
|
|
datetime_str = datetime_tag["datetime"]
|
|
|
|
try:
|
|
|
|
date_posted = datetime.strptime(datetime_str, "%Y-%m-%d")
|
2024-02-14 14:04:23 -08:00
|
|
|
except:
|
2023-09-28 16:11:28 -07:00
|
|
|
date_posted = None
|
2024-04-30 10:03:10 -07:00
|
|
|
job_details = {}
|
2024-01-22 18:22:32 -08:00
|
|
|
if full_descr:
|
2024-04-30 10:03:10 -07:00
|
|
|
job_details = self._get_job_details(job_url)
|
2023-09-28 16:11:28 -07:00
|
|
|
|
|
|
|
return JobPost(
|
|
|
|
title=title,
|
|
|
|
company_name=company,
|
2023-11-08 13:51:07 -08:00
|
|
|
company_url=company_url,
|
2023-09-28 16:11:28 -07:00
|
|
|
location=location,
|
|
|
|
date_posted=date_posted,
|
|
|
|
job_url=job_url,
|
2023-11-09 12:57:15 -08:00
|
|
|
compensation=compensation,
|
2024-04-30 10:03:10 -07:00
|
|
|
job_type=job_details.get("job_type"),
|
|
|
|
description=job_details.get("description"),
|
|
|
|
job_url_direct=job_details.get("job_url_direct"),
|
|
|
|
emails=extract_emails_from_text(job_details.get("description")),
|
|
|
|
logo_photo_url=job_details.get("logo_photo_url"),
|
2023-09-28 16:11:28 -07:00
|
|
|
)
|
|
|
|
|
2024-04-30 10:03:10 -07:00
|
|
|
def _get_job_details(self, job_page_url: str) -> dict:
|
2023-08-26 05:07:29 -07:00
|
|
|
"""
|
2024-04-30 10:03:10 -07:00
|
|
|
Retrieves job description and other job details by going to the job page url
|
2023-08-26 05:07:29 -07:00
|
|
|
:param job_page_url:
|
2024-04-30 10:03:10 -07:00
|
|
|
:return: dict
|
2023-08-26 05:07:29 -07:00
|
|
|
"""
|
2023-09-06 07:47:11 -07:00
|
|
|
try:
|
2024-01-09 17:32:51 -08:00
|
|
|
session = create_session(is_tls=False, has_retry=True)
|
2024-03-10 21:36:27 -07:00
|
|
|
response = session.get(
|
|
|
|
job_page_url, headers=self.headers, timeout=5, proxies=self.proxy
|
|
|
|
)
|
2023-09-07 09:28:17 -07:00
|
|
|
response.raise_for_status()
|
2024-02-14 14:04:23 -08:00
|
|
|
except:
|
2024-04-30 10:03:10 -07:00
|
|
|
return {}
|
2023-11-08 13:51:07 -08:00
|
|
|
if response.url == "https://www.linkedin.com/signup":
|
2024-04-30 10:03:10 -07:00
|
|
|
return {}
|
2023-08-26 05:07:29 -07:00
|
|
|
|
|
|
|
soup = BeautifulSoup(response.text, "html.parser")
|
|
|
|
div_content = soup.find(
|
|
|
|
"div", class_=lambda x: x and "show-more-less-html__markup" in x
|
|
|
|
)
|
2023-09-28 16:11:28 -07:00
|
|
|
description = None
|
2024-02-12 09:02:48 -08:00
|
|
|
if div_content is not None:
|
2024-03-10 21:36:27 -07:00
|
|
|
|
2024-02-12 09:02:48 -08:00
|
|
|
def remove_attributes(tag):
|
|
|
|
for attr in list(tag.attrs):
|
|
|
|
del tag[attr]
|
|
|
|
return tag
|
2024-03-10 21:36:27 -07:00
|
|
|
|
2024-02-12 09:02:48 -08:00
|
|
|
div_content = remove_attributes(div_content)
|
|
|
|
description = div_content.prettify(formatter="html")
|
2024-02-14 14:04:23 -08:00
|
|
|
if self.scraper_input.description_format == DescriptionFormat.MARKDOWN:
|
|
|
|
description = markdown_converter(description)
|
2024-04-30 10:03:10 -07:00
|
|
|
return {
|
|
|
|
"description": description,
|
|
|
|
"job_type": self._parse_job_type(soup),
|
|
|
|
"job_url_direct": self._parse_job_url_direct(soup),
|
|
|
|
"logo_photo_url": soup.find("img", {"class": "artdeco-entity-image"}).get(
|
|
|
|
"data-delayed-url"
|
|
|
|
),
|
|
|
|
}
|
2023-08-31 12:01:47 -07:00
|
|
|
|
2024-02-14 14:04:23 -08:00
|
|
|
def _get_location(self, metadata_card: Optional[Tag]) -> Location:
|
2023-07-10 20:07:19 -07:00
|
|
|
"""
|
|
|
|
Extracts the location data from the job metadata card.
|
|
|
|
:param metadata_card
|
|
|
|
:return: location
|
|
|
|
"""
|
2023-10-30 17:57:36 -07:00
|
|
|
location = Location(country=Country.from_string(self.country))
|
2023-07-08 07:34:55 -07:00
|
|
|
if metadata_card is not None:
|
|
|
|
location_tag = metadata_card.find(
|
|
|
|
"span", class_="job-search-card__location"
|
|
|
|
)
|
|
|
|
location_string = location_tag.text.strip() if location_tag else "N/A"
|
|
|
|
parts = location_string.split(", ")
|
|
|
|
if len(parts) == 2:
|
|
|
|
city, state = parts
|
|
|
|
location = Location(
|
|
|
|
city=city,
|
|
|
|
state=state,
|
2023-10-30 17:57:36 -07:00
|
|
|
country=Country.from_string(self.country),
|
2023-07-08 07:34:55 -07:00
|
|
|
)
|
2023-11-10 14:59:42 -08:00
|
|
|
elif len(parts) == 3:
|
|
|
|
city, state, country = parts
|
2024-03-10 21:36:27 -07:00
|
|
|
country = Country.from_string(country)
|
|
|
|
location = Location(city=city, state=state, country=country)
|
2023-07-08 07:34:55 -07:00
|
|
|
return location
|
2024-01-09 17:32:51 -08:00
|
|
|
|
|
|
|
@staticmethod
|
2024-02-14 14:04:23 -08:00
|
|
|
def _parse_job_type(soup_job_type: BeautifulSoup) -> list[JobType] | None:
|
|
|
|
"""
|
|
|
|
Gets the job type from job page
|
|
|
|
:param soup_job_type:
|
|
|
|
:return: JobType
|
|
|
|
"""
|
|
|
|
h3_tag = soup_job_type.find(
|
|
|
|
"h3",
|
|
|
|
class_="description__job-criteria-subheader",
|
|
|
|
string=lambda text: "Employment type" in text,
|
|
|
|
)
|
|
|
|
employment_type = None
|
|
|
|
if h3_tag:
|
|
|
|
employment_type_span = h3_tag.find_next_sibling(
|
|
|
|
"span",
|
|
|
|
class_="description__job-criteria-text description__job-criteria-text--criteria",
|
|
|
|
)
|
|
|
|
if employment_type_span:
|
|
|
|
employment_type = employment_type_span.get_text(strip=True)
|
|
|
|
employment_type = employment_type.lower()
|
|
|
|
employment_type = employment_type.replace("-", "")
|
|
|
|
|
|
|
|
return [get_enum_from_job_type(employment_type)] if employment_type else []
|
|
|
|
|
2024-04-30 09:36:01 -07:00
|
|
|
def _parse_job_url_direct(self, soup: BeautifulSoup) -> str | None:
|
|
|
|
"""
|
|
|
|
Gets the job url direct from job page
|
|
|
|
:param soup:
|
|
|
|
:return: str
|
|
|
|
"""
|
|
|
|
job_url_direct = None
|
|
|
|
job_url_direct_content = soup.find("code", id="applyUrl")
|
|
|
|
if job_url_direct_content:
|
|
|
|
job_url_direct_match = self.job_url_direct_regex.search(
|
|
|
|
job_url_direct_content.decode_contents().strip()
|
|
|
|
)
|
|
|
|
if job_url_direct_match:
|
|
|
|
job_url_direct = urllib.parse.unquote(job_url_direct_match.group())
|
|
|
|
|
|
|
|
return job_url_direct
|
|
|
|
|
2024-02-14 14:04:23 -08:00
|
|
|
@staticmethod
|
|
|
|
def job_type_code(job_type_enum: JobType) -> str:
|
2024-01-09 17:32:51 -08:00
|
|
|
return {
|
2024-02-14 14:04:23 -08:00
|
|
|
JobType.FULL_TIME: "F",
|
|
|
|
JobType.PART_TIME: "P",
|
|
|
|
JobType.INTERNSHIP: "I",
|
|
|
|
JobType.CONTRACT: "C",
|
|
|
|
JobType.TEMPORARY: "T",
|
|
|
|
}.get(job_type_enum, "")
|
|
|
|
|
|
|
|
headers = {
|
|
|
|
"authority": "www.linkedin.com",
|
|
|
|
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
|
|
|
|
"accept-language": "en-US,en;q=0.9",
|
|
|
|
"cache-control": "max-age=0",
|
|
|
|
"upgrade-insecure-requests": "1",
|
|
|
|
"user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
|
|
|
|
}
|