+ >
+ )
+ return (
+
+ )
+}
diff --git a/app/components/HelpGrid/helpgrid.css b/app/components/HelpGrid/helpgrid.css
new file mode 100644
index 00000000..bedb78bb
--- /dev/null
+++ b/app/components/HelpGrid/helpgrid.css
@@ -0,0 +1,11 @@
+.help-grid {
+ display: grid;
+ grid-template-columns: repeat(2, 1fr);
+ gap: var(--spacing-32);
+}
+
+@media (max-width: 780px) {
+ .help-grid {
+ grid-template-columns: 1fr;
+ }
+}
diff --git a/app/components/HelpGrid/index.tsx b/app/components/HelpGrid/index.tsx
new file mode 100644
index 00000000..f0e81975
--- /dev/null
+++ b/app/components/HelpGrid/index.tsx
@@ -0,0 +1,62 @@
+import './helpgrid.css'
+import Card from '../Card'
+
+const helpItems = [
+ {
+ title: 'Start a career in AI Safety',
+ description:
+ 'For both technical and non-technical roles in AI alignment, governance, and field-building',
+ impact: 'Highest direct impact',
+ icon: 'briefcase',
+ route: '/howcanihelppage/career',
+ },
+ {
+ title: 'Spread the word & grassroots activism',
+ description: 'For anyone—help us spread the word about this issue',
+ impact: 'Quickest & most accessible',
+ icon: 'megaphone',
+ route: '/howcanihelppage/grassroots',
+ },
+ {
+ title: 'Donate',
+ description:
+ 'The AI safety field is constrained by funding—financial help is critical at this moment',
+ impact: 'Highest indirect impact',
+ icon: 'piggy-bank',
+ route: '/howcanihelppage/donate',
+ },
+ {
+ title: 'Volunteer',
+ description:
+ 'Help us build important AI safety infrastructure—all skill sets and levels of time-commitment are wanted',
+ impact: 'Best for partial commitment',
+ icon: 'hand-heart',
+ route: '/howcanihelppage/volunteer',
+ },
+ {
+ title: 'Build your knowledge',
+ description:
+ 'Learning about AI safety equips you to effectively contribute to discussions and influence its development',
+ impact: 'Enables impact',
+ icon: 'book',
+ route: '/howcanihelppage/knowledge',
+ },
+ {
+ title: 'Join a community',
+ description:
+ 'Joining a community is motivating, and will help guide your efforts to contribute and get involved',
+ impact: 'Enables impact',
+ icon: 'people',
+ route: '/howcanihelppage/community',
+ },
+]
+
+export default function HelpGrid() {
+ return (
+
+ {helpItems.map((item, index) => (
+
+ ))}
+
+ )
+}
diff --git a/app/components/HowCanIHelp/Career.tsx b/app/components/HowCanIHelp/Career.tsx
new file mode 100644
index 00000000..f34c3760
--- /dev/null
+++ b/app/components/HowCanIHelp/Career.tsx
@@ -0,0 +1,73 @@
+import CardSmall from '../CardSmall'
+import './career.css'
+
+export default function Career() {
+ return (
+
+
Start a career
+
There are 3 major career paths in AI safety:
+
+
+
+
+
+
+
AI alignment research
+
+
+
What
+
+ AI alignment research is the field dedicated to ensuring that advanced artificial
+ intelligence systems act in ways that are beneficial to humans and aligned with human
+ values and goals. It involves developing methods and principles to guide AI behavior so
+ that as these systems become more capable and autonomous, they continue to operate
+ safely and ethically within the intentions set by their human creators.
+
+
+
+
Why this is important
+
+ To ensure humanity benefits from advanced AI and mitigates risks—like unintended
+ behaviors or misalignment with human values—we must first solve the technical challenge
+ of AI alignment through dedicated research, and then collaborate globally to carefully
+ deploy solutions. While experts believe alignment is solvable, it remains a complex
+ problem that demands significant high-quality intellectual talent.
+
+
+
+
Where AI alignment researchers work
+
+ AI alignment researchers typically work at non-profit organizations dedicated to AI
+ safety and alignment; in academia (i.e. universities and academic institutions);
+ independently; or on industry safety teams*, usually at major AI companies like OpenAI.
+
+
+ *Note: Beware of the risk of joining "safety" teams, as this work often leaks to
+ non-safety parts of the organization which improves the AI technology itself—and so ends
+ up causing harm.
+
+
+
+
You might be a good fit if...
+
+ You might be a good fit as an AI alignment researcher if you have a quantitative
+ background, you enjoy programming, or you're skilled at breaking down problems
+ logically, hypothesizing, and testing various solutions with high attention to detail.
+
+ )
+}
diff --git a/app/components/HelpGrid/index.tsx b/app/components/HelpGrid/index.tsx
index f0e81975..da7bebd2 100644
--- a/app/components/HelpGrid/index.tsx
+++ b/app/components/HelpGrid/index.tsx
@@ -1,5 +1,6 @@
import './helpgrid.css'
import Card from '../Card'
+import {Briefcase, Megaphone, PiggyBank, Hand, Book, People} from '../icons-generated'
const helpItems = [
{
@@ -7,14 +8,14 @@ const helpItems = [
description:
'For both technical and non-technical roles in AI alignment, governance, and field-building',
impact: 'Highest direct impact',
- icon: 'briefcase',
+ icon: Briefcase,
route: '/howcanihelppage/career',
},
{
title: 'Spread the word & grassroots activism',
description: 'For anyone—help us spread the word about this issue',
impact: 'Quickest & most accessible',
- icon: 'megaphone',
+ icon: Megaphone,
route: '/howcanihelppage/grassroots',
},
{
@@ -22,7 +23,7 @@ const helpItems = [
description:
'The AI safety field is constrained by funding—financial help is critical at this moment',
impact: 'Highest indirect impact',
- icon: 'piggy-bank',
+ icon: PiggyBank,
route: '/howcanihelppage/donate',
},
{
@@ -30,7 +31,7 @@ const helpItems = [
description:
'Help us build important AI safety infrastructure—all skill sets and levels of time-commitment are wanted',
impact: 'Best for partial commitment',
- icon: 'hand-heart',
+ icon: Hand,
route: '/howcanihelppage/volunteer',
},
{
@@ -38,7 +39,7 @@ const helpItems = [
description:
'Learning about AI safety equips you to effectively contribute to discussions and influence its development',
impact: 'Enables impact',
- icon: 'book',
+ icon: Book,
route: '/howcanihelppage/knowledge',
},
{
@@ -46,7 +47,7 @@ const helpItems = [
description:
'Joining a community is motivating, and will help guide your efforts to contribute and get involved',
impact: 'Enables impact',
- icon: 'people',
+ icon: People,
route: '/howcanihelppage/community',
},
]
diff --git a/app/components/HowCanIHelp/Career.tsx b/app/components/HowCanIHelp/Career.tsx
index f34c3760..e2460672 100644
--- a/app/components/HowCanIHelp/Career.tsx
+++ b/app/components/HowCanIHelp/Career.tsx
@@ -1,71 +1,477 @@
+import Card from '../Card'
import CardSmall from '../CardSmall'
+import DropDown from '../DropDown'
+import {Microscope, GovermentBuilding, PuzzlePieces} from '../icons-generated'
+import LinkCard from '../LinkCard'
import './career.css'
export default function Career() {
return (
Start a career
-
There are 3 major career paths in AI safety:
+
There are 3 major career paths in AI safety:
-
-
AI alignment research
-
-
-
What
-
- AI alignment research is the field dedicated to ensuring that advanced artificial
- intelligence systems act in ways that are beneficial to humans and aligned with human
- values and goals. It involves developing methods and principles to guide AI behavior so
- that as these systems become more capable and autonomous, they continue to operate
- safely and ethically within the intentions set by their human creators.
-
+
+
AI alignment research
+
+
+
What
+
+ AI alignment research is the field dedicated to ensuring that advanced artificial
+ intelligence systems act in ways that are beneficial to humans and aligned with human
+ values and goals. It involves developing methods and principles to guide AI behavior
+ so that as these systems become more capable and autonomous, they continue to operate
+ safely and ethically within the intentions set by their human creators.
+
+
+
+
Why this is important
+
+ To ensure humanity benefits from advanced AI and mitigates risks—like unintended
+ behaviors or misalignment with human values—we must first solve the technical
+ challenge of AI alignment through dedicated research, and then collaborate globally to
+ carefully deploy solutions. While experts believe alignment is solvable, it remains a
+ complex problem that demands significant high-quality intellectual talent.
+
+
+
+
Where AI alignment researchers work
+
+ AI alignment researchers typically work at non-profit organizations dedicated to AI
+ safety and alignment; in academia (i.e. universities and academic institutions);
+ independently; or on industry safety teams*, usually at major AI companies like
+ OpenAI.
+
+
+ *Note: Beware of the risk of joining "safety" teams, as this work often leaks to
+ non-safety parts of the organization which improves the AI technology itself—and so
+ ends up causing harm.
+
+
+
+
You might be a good fit if...
+
+ You might be a good fit as an AI alignment researcher if you have a quantitative
+ background, you enjoy programming, or you're skilled at breaking down problems
+ logically, hypothesizing, and testing various solutions with high attention to detail.
+
+
-
-
Why this is important
-
- To ensure humanity benefits from advanced AI and mitigates risks—like unintended
- behaviors or misalignment with human values—we must first solve the technical challenge
- of AI alignment through dedicated research, and then collaborate globally to carefully
- deploy solutions. While experts believe alignment is solvable, it remains a complex
- problem that demands significant high-quality intellectual talent.
+
+
+
+
Interested in pursuing this career path?
+
+ Take the following steps to (1) learn more & further assess your fit; (2) learn how to
+ make the transition
-
-
Where AI alignment researchers work
-
- AI alignment researchers typically work at non-profit organizations dedicated to AI
- safety and alignment; in academia (i.e. universities and academic institutions);
- independently; or on industry safety teams*, usually at major AI companies like OpenAI.
-
-
- *Note: Beware of the risk of joining "safety" teams, as this work often leaks to
- non-safety parts of the organization which improves the AI technology itself—and so ends
- up causing harm.
-
+
+
+
+ Read the 80,000 Hours Technical AI Safety Career Review
+
+
+
The review takes about one hour and addresses:
+
+
What this career path involves
+
How to predict your fit
+
The upsides and downsides of this career path
+
Compensation
+
How to enter or transition into this career
+
+
+
+
-
-
You might be a good fit if...
-
- You might be a good fit as an AI alignment researcher if you have a quantitative
- background, you enjoy programming, or you're skilled at breaking down problems
- logically, hypothesizing, and testing various solutions with high attention to detail.
-
+
+
+
+ Sign up for 1-on-1 career advice with AI Safety Quest & 80,000 Hours (free)
+
+
+ Schedule a 30-minute or 1-hour video call—we recommend booking both! These calls will
+ address your specific questions about the field, confirm your interest and fit, and
+ provide tailored recommendations to help you make the transition.
+
+
+ Note: 80,000 Hours does not accept all applicants.
+
+
+
+
+
+
+
+
+
+
+
+ A process note: Form your own understanding of the AI alignment technical challenge
+
+
+ AI safety is a relatively new field with diverse opinions on how best to solve the
+ technical challenge of AI alignment. Many unexplored avenues and important questions
+ likely remain unaddressed. Therefore, it's crucial for (aspiring) AI alignment
+ researchers to think independently and develop their own models on this topic. If you
+ pursue a career in this field, we recommend deeply educating yourself on the technical
+ challenge of alignment, engaging with other AI safety experts, and thinking critically
+ about the topic and current paradigms.
+
+
+
+
+
+
+
+
+
+
+
+
+
+ There are many roles that support the work of AI alignment researchers, and having
+ high-performing people in these roles is crucial. In a research organisation
+ around half of the staff will be doing other tasks essential for the organisation
+ to perform at its best and have an impact. Some of these roles include:
+
+
+
+
+
+ Operations management at an AI safety research organization
+
+
+ This involves overseeing the day-to-day activities that enable the organization
+ to function efficiently and effectively. Responsibilities may include
+ administrative support, resource allocation, HR, management of facilities, IT
+ support, project coordination, etc.
+
+
+
+
+
+
+
+
+
+ Research management AI safety research organization
+
+
+ This involves overseeing and coordinating research activities to ensure they
+ align with the mission of promoting safe AI development. Responsibilities
+ include setting research priorities, managing teams, allocating resources,
+ fostering collaboration, monitoring progress, and upholding ethical standards.
+
+
+
+
+
+
+
+
+
+ Being an executive assistant to an AI safety researcher
+
+
+ This involves managing administrative tasks to enhance this person’s
+ productivity. Responsibilities include scheduling meetings, handling
+ correspondence, coordinating travel, organizing events, and ensuring they can
+ focus on impactful AI safety efforts.
+
+
+
+
+
+
+
+ }
+ />
+
+
+
AI governance & policy
+
+
+
What
+
+ AI alignment research is the field dedicated to ensuring that advanced artificial
+ intelligence systems act in ways that are beneficial to humans and aligned with human
+ values and goals. It involves developing methods and principles to guide AI behavior
+ so that as these systems become more capable and autonomous, they continue to operate
+ safely and ethically within the intentions set by their human creators.
+
+
+
+
Why this is important
+
+ To ensure humanity benefits from advanced AI and mitigates risks—like unintended
+ behaviors or misalignment with human values—we must first solve the technical
+ challenge of AI alignment through dedicated research, and then collaborate globally to
+ carefully deploy solutions. While experts believe alignment is solvable, it remains a
+ complex problem that demands significant high-quality intellectual talent.
+
+
+
+
Where AI alignment researchers work
+
+ AI alignment researchers typically work at non-profit organizations dedicated to AI
+ safety and alignment; in academia (i.e. universities and academic institutions);
+ independently; or on industry safety teams*, usually at major AI companies like
+ OpenAI.
+
+
+
+
You might be a good fit if...
+
+ You might be a good fit as an AI alignment researcher if you have a quantitative
+ background, you enjoy programming, or you're skilled at breaking down problems
+ logically, hypothesizing, and testing various solutions with high attention to detail.
+
+
+
+
+
+
AI safety field-building
+
+
+
What
+
+ AI alignment research is the field dedicated to ensuring that advanced artificial
+ intelligence systems act in ways that are beneficial to humans and aligned with human
+ values and goals. It involves developing methods and principles to guide AI behavior
+ so that as these systems become more capable and autonomous, they continue to operate
+ safely and ethically within the intentions set by their human creators.
+
+
+
+
Why this is important
+
+ To ensure humanity benefits from advanced AI and mitigates risks—like unintended
+ behaviors or misalignment with human values—we must first solve the technical
+ challenge of AI alignment through dedicated research, and then collaborate globally to
+ carefully deploy solutions. While experts believe alignment is solvable, it remains a
+ complex problem that demands significant high-quality intellectual talent.
+
+
+
+
You might be a good fit if...
+
+ You might be a good fit as an AI alignment researcher if you have a quantitative
+ background, you enjoy programming, or you're skilled at breaking down problems
+ logically, hypothesizing, and testing various solutions with high attention to detail.
+
+
+
+
+
+
Most common field-building roles
+
+
+
Communications & advocacy
+
+ Communications involves educating the public or spreading the word about AI
+ safety—most typically through websites or social media. People with computer skills
+ and creative skills can typically find a place within communications. More
+ specifically, roles could include being an independent content producer, software
+ engineering, project management, or design.
+
+
+
+
+
+
+
+
+
Being a grantmaker
+
+ There are many philanthropists interested in donating millions of dollars to AI
+ safety—but there currently aren’t enough grantmakers able to vet funding proposals.
+ Because a randomly chosen proposal has little expected impact, grantmakers can have a
+ large impact by helping philanthropists distinguish promising projects in AI safety
+ from less promising ones.
+
+
+
+
+
+
+
+
+
Founding new projects
+
+ Founding a new project in AI safety involves identifying a gap in a pressing problem
+ area, formulating a solution, investigating it, and then helping to build an
+ organisation by investing in strategy, hiring, management, culture, and so on—ideally
+ building something that can continue without you.
+
+
+
+
+
+
+
+
+
+
+
Supporting roles
+
+
+ There are many roles that support the work of AI alignment researchers, and
+ having high-performing people in these roles is crucial. In a research
+ organisation around half of the staff will be doing other tasks essential for
+ the organisation to perform at its best and have an impact. Some of these roles
+ include:
+
+
+
+
+
+ Operations management at an AI safety research organization
+
+
+ This involves overseeing the day-to-day activities that enable the
+ organization to function efficiently and effectively. Responsibilities may
+ include administrative support, resource allocation, HR, management of
+ facilities, IT support, project coordination, etc.
+
+
+
+
+
+
+
+
+
+ Research management AI safety research organization
+
+
+ This involves overseeing and coordinating research activities to ensure they
+ align with the mission of promoting safe AI development. Responsibilities
+ include setting research priorities, managing teams, allocating resources,
+ fostering collaboration, monitoring progress, and upholding ethical standards.
+
+
+
+
+
+
+
+
+
+ Being an executive assistant to an AI safety researcher
+
+
+ This involves managing administrative tasks to enhance this person’s
+ productivity. Responsibilities include scheduling meetings, handling
+ correspondence, coordinating travel, organizing events, and ensuring they can
+ focus on impactful AI safety efforts.
+
+
+
+
+
+
+
+
+
Other technical roles
+
+
+
+ Working in information security to protect AI (or the results of key
+ experiments) from misuse, theft, or tampering
+
+
+ Lorem ipsum dolor sit amet consectetur. Sapien ullamcorper morbi habitasse
+ justo magna. Suspendisse nunc id lacus sit interdum sit.
+
+
+
+
+
+
+
+
+
+ Becoming an expert in AI hardware as a way of steering AI progress in safer
+ directions
+
+
+ Lorem ipsum dolor sit amet consectetur. Sapien ullamcorper morbi habitasse
+ justo magna. Suspendisse nunc id lacus sit interdum sit.
+
+
+
+
+
+
+
+
+ }
+ />
+
+
+
+ Multiply your impact: Support your career pursuit
+
- Working in information security to protect AI (or the results of key
+ Working in information security to protect AI (or the results of key
experiments) from misuse, theft, or tampering
@@ -417,7 +417,7 @@ export default function Career() {
- Becoming an expert in AI hardware as a way of steering AI progress in safer
+ Becoming an expert in AI hardware as a way of steering AI progress in safer
directions
@@ -426,7 +426,7 @@ export default function Career() {
- The AI safety movement is still relatively new, and your actions could have significant
- impact. Here's what you can do:
+ The AI movement is still relatively new, and your actions could have significant impact.
+ Here's what you can do:
- The AI movement is still relatively new, and your actions could have significant impact.
- Here's what you can do:
+ The AI safety movement is still relatively new, and your actions could have significant
+ impact. Here's what you can do:
)
diff --git a/app/components/DropDown/dropdown.css b/app/components/DropDown/dropdown.css
index 76560d8c..d058ec64 100644
--- a/app/components/DropDown/dropdown.css
+++ b/app/components/DropDown/dropdown.css
@@ -1,6 +1,6 @@
.drop-down {
- border-top: 1px solid var(--colors-teal-500);
- border-bottom: 1px solid var(--colors-teal-500);
+ border-top: 1px solid var(--colors-teal-200);
+ border-bottom: 1px solid var(--colors-teal-200);
}
.drop-down-header {
diff --git a/app/components/DropDown/index.tsx b/app/components/DropDown/index.tsx
index 01595987..cc5d42c0 100644
--- a/app/components/DropDown/index.tsx
+++ b/app/components/DropDown/index.tsx
@@ -15,8 +15,8 @@ export default function DropDown({title, content}: DropDownProps) {
diff --git a/app/components/HowCanIHelp/Career.tsx b/app/components/HowCanIHelp/Career.tsx
index 053fbd1b..ab5dbba5 100644
--- a/app/components/HowCanIHelp/Career.tsx
+++ b/app/components/HowCanIHelp/Career.tsx
@@ -3,6 +3,7 @@ import CardSmall from '../CardSmall'
import DropDown from '../DropDown'
import {Microscope, GovermentBuilding, PuzzlePieces} from '../icons-generated'
import LinkCard from '../LinkCard'
+import Testimonial from '../Testimonial'
import './howcanihelp.css'
export default function Career() {
@@ -64,10 +65,10 @@ export default function Career() {
independently; or on industry safety teams*, usually at major AI companies like
OpenAI.
-
- *Note: Beware of the risk of joining "safety" teams, as this work often leaks to
- non-safety parts of the organization which improves the AI technology itself—and so
- ends up causing harm.
+
+ *Note: Beware of the risk of joining "safety"
+ teams, as this work often leaks to non-safety parts of the organization which improves
+ the AI technology itself—and so ends up causing harm.
@@ -90,7 +91,7 @@ export default function Career() {
-
+
Read the 80,000 Hours Technical AI Safety Career Review
@@ -108,7 +109,7 @@ export default function Career() {
-
+
Sign up for 1-on-1 career advice with AI Safety Quest & 80,000 Hours (free)
@@ -128,7 +129,7 @@ export default function Career() {
-
+
A process note: Form your own understanding of the AI alignment technical challenge
@@ -255,6 +274,92 @@ export default function Career() {
+
+
+
Interested in pursuing this career path?
+
+ Take the following steps to (1) learn more & further assess your fit; (2) learn how to
+ make the transition
+
+
+
+
+
+ Read the 80,000 Hours Technical AI Safety Career Review
+
+
+
The review takes about one hour and addresses:
+
+
What this career path involves
+
How to predict your fit
+
The upsides and downsides of this career path
+
Compensation
+
How to enter or transition into this career
+
+
+
+
+
+
+
+
+ Sign up for 1-on-1 career advice with AI Safety Quest & 80,000 Hours (free)
+
+
+ Schedule a 30-minute or 1-hour video call—we recommend booking both! These calls will
+ address your specific questions about the field, confirm your interest and fit, and
+ provide tailored recommendations to help you make the transition.
+
+
+ Note: 80,000 Hours does not accept all applicants.
+
+
+
+
+
+
+
+
+
+
+
+ A process note: Form your own understanding of the AI alignment technical challenge
+
+
+ AI safety is a relatively new field with diverse opinions on how best to solve the
+ technical challenge of AI alignment. Many unexplored avenues and important questions
+ likely remain unaddressed. Therefore, it's crucial for (aspiring) AI alignment
+ researchers to think independently and develop their own models on this topic. If you
+ pursue a career in this field, we recommend deeply educating yourself on the technical
+ challenge of alignment, engaging with other AI safety experts, and thinking critically
+ about the topic and current paradigms.
+
+ Take the following steps to (1) learn more & further assess your fit; (2) learn how to
+ make the transition
+
+
+
+
+
+ Sign up for 1-on-1 career advice with AI Safety Quest & 80,000 Hours (free)
+
+
+ Schedule a 30-minute or 1-hour video call—we recommend booking both! These calls will
+ address your specific questions about the field, confirm your interest and fit, and
+ provide tailored recommendations to help you make the transition.
+
+
+ Note: 80,000 Hours does not accept all applicants.
+
+
+
+
+
+
+
+
+
+
+
+ A process note: Form your own understanding of the AI alignment technical challenge
+
+
+ AI safety is a relatively new field with diverse opinions on how best to solve the
+ technical challenge of AI alignment. Many unexplored avenues and important questions
+ likely remain unaddressed. Therefore, it's crucial for (aspiring) AI alignment
+ researchers to think independently and develop their own models on this topic. If you
+ pursue a career in this field, we recommend deeply educating yourself on the technical
+ challenge of alignment, engaging with other AI safety experts, and thinking critically
+ about the topic and current paradigms.
+