diff --git a/.gitignore b/.gitignore index d8fdba97c..ac91c0ca7 100644 --- a/.gitignore +++ b/.gitignore @@ -106,5 +106,12 @@ package-lock.json # translations are stored in the `i18n` via crowdin i18n + +# code-import +code/node_modules +code/package-lock.json +code/yarn.lock +code/pnpm-lock.yaml + # vscode configuration -.vscode \ No newline at end of file +.vscode diff --git a/.husky/.gitignore b/.husky/.gitignore new file mode 100644 index 000000000..31354ec13 --- /dev/null +++ b/.husky/.gitignore @@ -0,0 +1 @@ +_ diff --git a/.husky/pre-commit b/.husky/pre-commit new file mode 100755 index 000000000..2312dc587 --- /dev/null +++ b/.husky/pre-commit @@ -0,0 +1 @@ +npx lint-staged diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index edcee7aac..fdf3f949a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,6 +21,9 @@ transparent as possible, whether it's: - publicly displayed via the UI of [solana.com](https://solana.com) (located in a different repo) - content translations are supported via Crowdin +- code blocks must use code-import for file snippets (via filesystem) +- code file should be [tests](https://nodejs.org/api/test.html) and should add + code ranges instead of whole test file ## Style guidelines @@ -45,9 +48,7 @@ In particular: and save the person reviewing your PR some time. We recommend [Grammarly](https://grammarly.com/). In [your Grammarly dictionary](https://account.grammarly.com/customize), you may - wish to add Solana-specific words like `lamport`, `blockhash`, etc. For VScode - users, there is a - [VScode extension for Grammarly](https://marketplace.visualstudio.com/items?itemName=znck.grammarly). + wish to add Solana-specific words like `lamport`, `blockhash`, etc. - Use US English rather than British English. Grammarly will catch this for you. - Use 'onchain' (not on-chain, definitely not smart contract) when referring to onchain apps. This comes from the Solana Foundation style guide, and is @@ -273,6 +274,52 @@ For images, you can use the path starting with `/public` like this: > links will be automatically adjusted to function on the website. Including > making the images viewable and removing `.md` file extensions. +### Code Blocks + +In addition to standard markdown "fenced" code blocks (i.e. using triple +backticks), the developer content repo requires the use of code-import for file +snippets. This ensures that code examples are always up-to-date with the actual +source files. + +#### Using code-import + +To use code-import, follow these steps: + +Ensure your code file is a test file located in the appropriate directory within +the repo. Use the following syntax to import code snippets: + +```javascript file="/path/to/your/file.ts#L1-L10,#L15-L20" + +``` + +This will import lines 1-10 and 15-20 from the specified file. + +Always use code ranges instead of importing whole files. This helps keep +examples concise and focused. + +#### Code-import Rules + +- The file path must start with a forward slash (/). +- You can specify multiple line ranges, separated by commas. +- Line ranges should be in ascending order and not overlap. +- Invalid ranges (e.g., #L4-L3) are not allowed. +- Line numbers start at 1, so #L0 is invalid. +- Trailing commas in the range specification are not allowed. + +Example of a valid code-import: + +```javascript file="/code/cookbook/wallets/check-public-key.ts#L1-L2,#L3-L18" + +``` + +Example of an invalid code-import: + +```javascript file=/code/cookbook/wallets/check-public-key.ts#L1-L2,#L3-L19,#L1-L3 + +``` + +This is invalid because the ranges are not in ascending order and overlap. + ### Table of contents When a content page is rendered on solana.com, a table of contents will be @@ -519,7 +566,7 @@ a list of available components content - [images](#images) - details about how to include images in a piece of content - [code blocks](#code-blocks) - additional functionality on top of standard - markdown code blocks + markdown code blocks, these support code file import from filesystem - [blockquote](#blockquote) - additional functionality on top of the standard HTML `blockquote` element - [Callout](#callout) - custom component used to render message to the reader in diff --git a/code/cookbook/wallets/check-public-key.ts b/code/cookbook/wallets/check-public-key.ts new file mode 100644 index 000000000..f5b4c8392 --- /dev/null +++ b/code/cookbook/wallets/check-public-key.ts @@ -0,0 +1,19 @@ +import { PublicKey } from "@solana/web3.js"; + +// Note that Keypair.generate() will always give a public key that is valid for users + +// Valid public key +const key = new PublicKey("5oNDL3swdJJF1g9DzJiZ4ynHXgszjAEpUkxVYejchzrY"); +// Lies on the ed25519 curve and is suitable for users +console.log(PublicKey.isOnCurve(key.toBytes())); + +// Valid public key +const offCurveAddress = new PublicKey( + "4BJXYkfvg37zEmBbsacZjeQDpTNx91KppxFJxRqrz48e", +); + +// Not on the ed25519 curve, therefore not suitable for users +console.log(PublicKey.isOnCurve(offCurveAddress.toBytes())); + +// Not a valid public key +const errorPubkey = new PublicKey("testPubkey"); diff --git a/code/package.json b/code/package.json new file mode 100644 index 000000000..4429cdec8 --- /dev/null +++ b/code/package.json @@ -0,0 +1,15 @@ +{ + "name": "code", + "version": "1.0.0", + "description": "", + "main": "index.js", + "scripts": { + "test": "echo \"Error: no test specified\" && exit 1" + }, + "keywords": [], + "author": "", + "license": "ISC", + "dependencies": { + "@solana/web3.js": "^1.95.2" + } +} diff --git a/coder.ts b/coder.ts new file mode 100644 index 000000000..ae53d4693 --- /dev/null +++ b/coder.ts @@ -0,0 +1,222 @@ +import { promises as fs } from "node:fs"; +import path from "node:path"; +import os from "node:os"; +import { unified } from "unified"; +import remarkParse from "remark-parse"; +import remarkStringify from "remark-stringify"; +import remarkFrontmatter from "remark-frontmatter"; +import { visit } from "unist-util-visit"; +import ignore, { type Ignore } from "ignore"; +import importCode from "./src/utils/code-import"; +import chokidar from "chokidar"; + +let debugMode = false; + +const debug = (...args: string[]) => { + if (debugMode) { + console.log("[DEBUG]", ...args); + } +}; + +const hasCodeComponentWithFileMeta = async ( + filePath: string, +): Promise => { + const content = await fs.readFile(filePath, "utf8"); + let hasMatch = false; + + const tree = unified().use(remarkParse).use(remarkFrontmatter).parse(content); + + visit(tree, "code", node => { + if (node.meta?.includes("file=")) { + hasMatch = true; + return false; // Stop visiting + } + }); + + return hasMatch; +}; + +const getIgnore = async (directory: string): Promise => { + const ig = ignore(); + + try { + const gitignoreContent = await fs.readFile( + path.join(directory, ".gitignore"), + "utf8", + ); + ig.add(gitignoreContent); + // ignore all dotfiles + ig.add([".*"]); + // ignore CONTRIBUTING.md because it mentions the code component example + ig.add("CONTRIBUTING.md"); + } catch (error) { + // If .gitignore doesn't exist, just continue without it + if ((error as NodeJS.ErrnoException).code !== "ENOENT") { + throw error; + } + } + + return ig; +}; + +const getMarkdownAndMDXFiles = async (directory: string): Promise => { + const ig = await getIgnore(directory); + + const walkDir = async (dir: string): Promise => { + const entries = await fs.readdir(dir, { withFileTypes: true }); + const files = await Promise.all( + entries.map(async entry => { + const res = path.resolve(dir, entry.name); + const relativePath = path.relative(directory, res); + + if (ig.ignores(relativePath) || entry.name === ".gitignore") { + debug(`Ignoring file: ${relativePath}`); + return []; + } + + if (entry.isDirectory()) { + return walkDir(res); + } + + if ( + entry.isFile() && + (entry.name.endsWith(".md") || entry.name.endsWith(".mdx")) + ) { + if (await hasCodeComponentWithFileMeta(res)) { + debug(`Found file with code component: ${relativePath}`); + return res; + } + debug( + `Skipping file (no code component with file meta): ${relativePath}`, + ); + } + + return []; + }), + ); + return files.flat(); + }; + + return walkDir(directory); +}; + +const processContent = async ( + content: string, + filePath: string, +): Promise => { + try { + const file = await unified() + .use(remarkParse) + .use(remarkFrontmatter) + .use(importCode, { + preserveTrailingNewline: false, + removeRedundantIndentations: true, + rootDir: process.cwd(), + }) + .use(remarkStringify, { + bullet: "-", + emphasis: "*", + fences: true, + listItemIndent: "one", + rule: "-", + ruleSpaces: false, + strong: "*", + tightDefinitions: true, + }) + .process(content); + return String(file); + } catch (error) { + if ((error as NodeJS.ErrnoException).code === "ENOENT") { + throw new Error( + `File not found: ${(error as NodeJS.ErrnoException).path}`, + ); + } + throw error; + } +}; + +const processFile = async (filePath: string): Promise => { + try { + if (!(await hasCodeComponentWithFileMeta(filePath))) { + debug(`Skipping ${filePath}: No code component with file meta found.`); + return; + } + + const originalContent = await fs.readFile(filePath, "utf8"); + const processedContent = await processContent(originalContent, filePath); + if (originalContent !== processedContent) { + await fs.writeFile(filePath, processedContent); + console.log(`Updated: ${filePath}`); + } else { + debug(`No changes needed for: ${filePath}`); + } + } catch (error) { + console.error(`Error processing ${filePath}: ${(error as Error).message}`); + } +}; + +const processInChunks = async ( + items: T[], + processItem: (item: T) => Promise, + chunkSize: number, +): Promise => { + for (let i = 0; i < items.length; i += chunkSize) { + const chunk = items.slice(i, i + chunkSize); + await Promise.all(chunk.map(processItem)); + } +}; + +const watchFiles = async (directory: string): Promise => { + const watcher = chokidar.watch(["**/*.md", "**/*.mdx"], { + ignored: [ + "**.**", + /(^|[\/\\])\../, + "**/node_modules/**", + "**/.git/**", + ".gitignore", + ], // ignore dotfiles, node_modules, .git, and .gitignore + persistent: true, + cwd: directory, + }); + + console.log("Watch mode started. Waiting for file changes..."); + + watcher + .on("add", filePath => processFile(path.join(directory, filePath))) + .on("change", filePath => processFile(path.join(directory, filePath))) + .on("unlink", filePath => console.log(`File ${filePath} has been removed`)); +}; + +const main = async (): Promise => { + const filePath = process.argv[2]; + const watchMode = + process.argv.includes("--watch") || process.argv.includes("-w"); + debugMode = process.argv.includes("--debug") || process.argv.includes("-d"); + + if (debugMode) { + console.log("Debug mode enabled"); + } + + if (filePath && !watchMode && !debugMode) { + // Process single file + const absolutePath = path.resolve(process.cwd(), filePath); + console.log(`Processing single file: ${absolutePath}`); + await processFile(absolutePath); + } else if (watchMode) { + // Watch mode + await watchFiles(process.cwd()); + } else { + // Process all files + const files = await getMarkdownAndMDXFiles(process.cwd()); + const chunkSize = Math.max(1, Math.ceil(files.length / os.cpus().length)); + + console.log(`Processing ${files.length} files...`); + await processInChunks(files, processFile, chunkSize); + } + + if (!watchMode) { + console.log("Sync process completed."); + } +}; + +main().catch(console.error); diff --git a/content/cookbook/wallets/check-publickey.md b/content/cookbook/wallets/check-publickey.md index a16eb4ac5..287f64c2e 100644 --- a/content/cookbook/wallets/check-publickey.md +++ b/content/cookbook/wallets/check-publickey.md @@ -11,7 +11,7 @@ have a private key associated with them. You can check this by looking to see if the public key lies on the ed25519 curve. Only public keys that lie on the curve can be controlled by users with wallets. -```javascript file="check-public-key.ts" +```javascript file=/code/cookbook/wallets/check-public-key.ts#L1-L2,#L3-L19 import { PublicKey } from "@solana/web3.js"; // Note that Keypair.generate() will always give a public key that is valid for users diff --git a/content/courses/connecting-to-offchain-data/metadata.yml b/content/courses/connecting-to-offchain-data/metadata.yml index 2400160a0..90ac8ed84 100644 --- a/content/courses/connecting-to-offchain-data/metadata.yml +++ b/content/courses/connecting-to-offchain-data/metadata.yml @@ -5,6 +5,3 @@ lessons: - oracles - verifiable-randomness-functions priority: 20 -# Uses out of date repos -# TODO: Superteam to update -isHidden: true diff --git a/content/courses/connecting-to-offchain-data/oracles.md b/content/courses/connecting-to-offchain-data/oracles.md index 674ebd9dd..82ce77a90 100644 --- a/content/courses/connecting-to-offchain-data/oracles.md +++ b/content/courses/connecting-to-offchain-data/oracles.md @@ -1406,7 +1406,7 @@ the data feed account does not exist anymore, withdraw the user's escrowed funds. A potential solution to this challenge can be found -[in the Github repository on the `challenge-solution` branch](https://github.com/Unboxed-Software/michael-burry-escrow/tree/challenge-solution). +[in the Github repository on the `challenge-solution` branch](https://github.com/solana-developers/burry-escrow/tree/challenge-solution). diff --git a/content/courses/connecting-to-offchain-data/verifiable-randomness-functions.md b/content/courses/connecting-to-offchain-data/verifiable-randomness-functions.md index 3f4445c28..603a62b55 100644 --- a/content/courses/connecting-to-offchain-data/verifiable-randomness-functions.md +++ b/content/courses/connecting-to-offchain-data/verifiable-randomness-functions.md @@ -12,9 +12,9 @@ description: "Use proper cryptographic randomness in your onchain programs." - Attempts at generating randomness within your program are likely to be guessable by users given there's no true randomness onchain. -- Verifiable Random Functions (VRFs) give developers the opportunity to +- Verifiable Random Functions (VRFs) allow developers to incorporate securely generated random numbers in their onchain programs. -- A VRF is a public-key pseudorandom function that provides proofs that its +- A VRF is a public-key pseudorandom function that proves its outputs were calculated correctly. - Switchboard offers a developer-friendly VRF for the Solana ecosystem. @@ -24,7 +24,7 @@ description: "Use proper cryptographic randomness in your onchain programs." Random numbers are **_not_** natively allowed onchain. This is because Solana is deterministic, every validator runs your code and needs to have the same result. -So if you wanted to create a raffle program, you'd have to look outside of the +So if you wanted to create a raffle program, you'd have to look outside the blockchain for your randomness. This is where Verifiable Random Functions (VRFs) come in. VRFs offer developers a secure means of integrating randomness onchain in a decentralized fashion. @@ -33,7 +33,7 @@ in a decentralized fashion. Before we dive into how random numbers can be generated for a blockchain, we must first understand how they are generated on traditional computer systems. -There are really two types of random numbers: _true random_ and _pseudorandom_. +There are two types of random numbers: _true random_ and _pseudorandom_. The difference between the two lies in how the numbers are generated. Computers can acquire _true random_ numbers by taking some type of physical @@ -50,8 +50,8 @@ called a seed and then use mathematical formulas to generate subsequent numbers in the sequence. Given the same seed, a PRNG will always produce the same sequence of numbers. It's important to seed with something close to true entropy: an admin-provided "random" input, the last system log, some combination -of your system's clock time and other factors, etc.. Fun fact: older video games -have been broken because speedrunners found out how their randomness was +of your system's clock time and other factors, etc.. Fun fact: some older video +games have been broken because speed runners found out how their randomness was calculated. One game in particular used the number of steps you've taken in the game as a seed. @@ -65,7 +65,7 @@ more. So we'll have to look outside of the blockchain for randomness with VRFs. ### What is Verifiable Randomness? A Verifiable Random Function (VRF) is a public-key pseudorandom function that -provides proofs that its outputs were calculated correctly. This means we can +provides proof that its outputs were calculated correctly. This means we can use a cryptographic keypair to generate a random number with a proof, which can then be validated by anyone to ensure the value was calculated correctly without the possibility of leaking the producer's secret key. Once validated, the random @@ -73,13 +73,13 @@ value is stored onchain in an account. VRFs are a crucial component for achieving verifiable and unpredictable randomness on a blockchain, addressing some of the shortcomings of traditional -PRNGs and the challenges with achieving true randomness in a decentralized +PRNGs and the challenges of achieving true randomness in a decentralized system. There are three key properties of a VRF: 1. **Deterministic** - A VRF takes a secret key and a nonce as inputs and - deterministically produces an output ( seeding ). The result is a seemingly + deterministically produces an output (seeding). The result is a seemingly random value. Given the same secret key and nonce, the VRF will always produce the same output. This property ensures that the random value can be reproduced and verified by anyone. @@ -91,7 +91,7 @@ There are three key properties of a VRF: generated by a VRF using the corresponding secret key and nonce. VRFs are not specific to Solana and have been utilized on other blockchains to -generate pseudorandom numbers. Fortunately switchboard offers their +generate pseudorandom numbers. Fortunately, switchboard offers their implementation of VRF to Solana. ### Switchboard VRF Implementation @@ -135,15 +135,15 @@ and consuming randomness from Switchboard looks like this: 6. Once VRF proof is verified, the Switchboard program will invoke the `callback` that was passed in as the callback in the initial request with the pseudorandom number returned from the Oracle. -7. Program consumes the random number and can execute business logic with it! +7. The program consumes the random number and can execute business logic with it! There are a lot of steps here, but don't worry, we'll be going through each step of the process in detail. -First there are a couple of accounts that we will have to create ourselves to +First, there are a couple of accounts that we will have to create to request randomness, specifically the `authority` and `vrf` accounts. The -`authority` account is a PDA derived from our program that is requesting the -randomness. So the PDA we create will have our own seeds for our own needs. For +`authority` account is a PDA derived from our program that is requesting +randomness. So the PDA we create will have our seeds for our needs. For now, we'll simply set them at `VRFAUTH`. ```typescript @@ -167,9 +167,9 @@ pub struct VrfAccountData { pub counter: u128, /// Onchain account delegated for making account changes. <-- This is our PDA pub authority: Pubkey, - /// The OracleQueueAccountData that is assigned to fulfill VRF update request. + /// The OracleQueueAccountData that is assigned to fulfill the VRF update request. pub oracle_queue: Pubkey, - /// The token account used to hold funds for VRF update request. + /// The token account used to hold funds for VRF update requests. pub escrow: Pubkey, /// The callback that is invoked when an update request is successfully verified. pub callback: CallbackZC, @@ -180,7 +180,7 @@ pub struct VrfAccountData { /// The number of builders. pub builders_len: u32, pub test_mode: bool, - /// Oracle results from the current round of update request that has not been accepted as valid yet + /// Oracle results from the current round of update requests that have not been accepted as valid yet pub current_round: VrfRound, /// Reserved for future info. pub _ebuf: [u8; 1024], @@ -188,15 +188,15 @@ pub struct VrfAccountData { ``` Some important fields on this account are `authority`, `oracle_queue`, and -`callback`. The `authority` should be a PDA of the program that has the ability -to request randomness on this `vrf` account. That way, only that program can +`callback`. The `authority` should be a PDA of the program that can + request randomness on this `vrf` account. That way, only that program can provide the signature needed for the vrf request. The `oracle_queue` field -allows you to specify which specific oracle queue you'd like to service the vrf -requests made with this account. If you aren't familiar with oracle queues on -Switchboard, checkout the +allows you to specify which specific oracle queue you’d like to service the vrf +requests made with this account. If you aren’t familiar with oracle queues on +Switchboard, check the [Oracles lesson in the Connecting to Offchain Data course](/content/courses/connecting-to-offchain-data/oracles)! Lastly, the `callback` field is where you define the callback instruction the -Switchboard program should invoke once the randomness result has be verified. +Switchboard program should be invoked once the randomness result has been verified. The `callback` field is of type `[CallbackZC](https://github.com/switchboard-xyz/solana-sdk/blob/9dc3df8a5abe261e23d46d14f9e80a7032bb346c/rust/switchboard-solana/src/oracle_program/accounts/ecvrf.rs#L25)`. @@ -212,9 +212,9 @@ pub struct CallbackZC { /// The number of accounts used in the callback pub accounts_len: u32, /// The serialized instruction data. - pub ix_data: [u8; 1024], + pub instruction_data: [u8; 1024], /// The number of serialized bytes in the instruction data. - pub ix_data_len: u32, + pub instruction_data_len: u32, } ``` @@ -234,8 +234,8 @@ const vrfCallback: Callback = { { pubkey: vrfClientKey, isSigner: false, isWritable: true }, { pubkey: vrfSecret.publicKey, isSigner: false, isWritable: true }, ], - // use name of instruction - ixData: vrfIxCoder.encode("consumeRandomness", ""), // pass any params for instruction here + // use the name of instruction + instructionData: vrfInstructionCoder.encode("consumeRandomness", ""), // pass any params for instruction here } ``` @@ -300,14 +300,13 @@ That's a lot of accounts, let's walk through each one and give them some context. - `authority` - PDA derived from our program -- `vrf` - - [Account owned by the Switchboard program](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/vrf/struct.VrfAccountData.html) +- `vrf` - Account owned by the Switchboard program - Oracle Queue - [Account owned by Switchboard program that contains metadata about the oracle queue to use for this request](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/queue/struct.OracleQueueAccountData.html) - Queue Authority - Authority of the Oracle Queue chosen - [Data Buffer](https://github.com/switchboard-xyz/solana-sdk/blob/9dc3df8a5abe261e23d46d14f9e80a7032bb346c/rust/switchboard-solana/src/oracle_program/accounts/queue.rs#L57C165-L57C165) - Account of the `OracleQueueBuffer` account holding a collection of Oracle - pubkeys that have successfully hearbeated before the queues `oracleTimeout` + pubkeys that have successfully heartbeat before the queues `oracleTimeout` configuration has elapsed. Stored in the Oracle Queue account. - [Permission Account Data](https://docs.rs/switchboard-solana/latest/switchboard_solana/oracle_program/accounts/permission/struct.PermissionAccountData.html) - Escrow (Switchboard escrow account) - Token Account @@ -323,41 +322,41 @@ context. That's all the accounts needed for just the randomness request, now let's see what it looks like in a Solana program via CPI. To do this, we make use of the `VrfRequestRandomness` data struct from the -[SwitchboardV2 rust crate.](https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/instructions/vrf_request_randomness.rs) +[Switchboard-Solana rust crate.](https://github.com/switchboard-xyz/solana-sdk/blob/main/rust/switchboard-solana/src/oracle_program/instructions/vrf_request_randomness.rs) This struct has some built-in capabilities to make our lives easier here, most -notably the account structure is defined for us and we can easily call `invoke` +notably, the account structure is defined for us and we can easily call `invoke` or `invoke_signed` on the object. ```rust // our client program -use switchboard_v2::VrfRequestRandomness; +use switchboard_solana::VrfRequestRandomness; use state::*; pub fn request_randomness(ctx: Context, request_params: RequestRandomnessParams) -> Result <()> { - let switchboard_program = ctx.accounts.switchboard_program.to_account_info(); - - let vrf_request_randomness = VrfRequestRandomness { - authority: ctx.accounts.vrf_state.to_account_info(), - vrf: ctx.accounts.vrf.to_account_info(), - oracle_queue: ctx.accounts.oracle_queue.to_account_info(), - queue_authority: ctx.accounts.queue_authority.to_account_info(), - data_buffer: ctx.accounts.data_buffer.to_account_info(), - permission: ctx.accounts.permission.to_account_info(), - escrow: ctx.accounts.switchboard_escrow.clone(), - payer_wallet: ctx.accounts.payer_wallet.clone(), - payer_authority: ctx.accounts.user.to_account_info(), - recent_blockhashes: ctx.accounts.recent_blockhashes.to_account_info(), - program_state: ctx.accounts.program_state.to_account_info(), - token_program: ctx.accounts.token_program.to_account_info(), - }; - - msg!("requesting randomness"); - vrf_request_randomness.invoke_signed( - switchboard_program, - request_params.switchboard_state_bump, - request_params.permission_bump, - state_seeds, - )?; + let switchboard_program = ctx.accounts.switchboard_program.to_account_info(); + + let vrf_request_randomness = VrfRequestRandomness { + authority: ctx.accounts.vrf_state.to_account_info(), + vrf: ctx.accounts.vrf.to_account_info(), + oracle_queue: ctx.accounts.oracle_queue.to_account_info(), + queue_authority: ctx.accounts.queue_authority.to_account_info(), + data_buffer: ctx.accounts.data_buffer.to_account_info(), + permission: ctx.accounts.permission.to_account_info(), + escrow: ctx.accounts.switchboard_escrow.clone(), + payer_wallet: ctx.accounts.payer_wallet.clone(), + payer_authority: ctx.accounts.user.to_account_info(), + recent_blockhashes: ctx.accounts.recent_blockhashes.to_account_info(), + program_state: ctx.accounts.program_state.to_account_info(), + token_program: ctx.accounts.token_program.to_account_info(), + }; + + msg!("requesting randomness"); + vrf_request_randomness.invoke_signed( + switchboard_program, + request_params.switchboard_state_bump, + request_params.permission_bump, + state_seeds, + )?; ... @@ -374,7 +373,7 @@ it back to the Switchboard program. Once the result is verified, the Switchboard program then invokes the `callback` instruction defined in the `vrf` account. The callback instruction is where you would have written your business logic using the random numbers. In the -following code we store the resulting randomness in our `vrf_auth` PDA from our +following code, we store the resulting randomness in our `vrf_auth` PDA from our first step. ```rust @@ -384,7 +383,7 @@ first step. pub struct ConsumeRandomness<'info> { // vrf client state #[account] - pub vrf_auth: AccountLoader<'info, VrfClientState>, + pub vrf_auth: AccountLoader<'info, VrfClient>, // switchboard vrf account #[account( mut, @@ -396,32 +395,32 @@ pub struct ConsumeRandomness<'info> { pub fn handler(ctx: Context) -> Result <()> { msg!("Consuming randomness!"); - // load the vrf account data + // load the vrf account data let vrf = ctx.accounts.vrf.load()?; - // use the get_result method to fetch the randomness results + // use the get_result method to fetch the randomness results let result_buffer = vrf.get_result()?; - // check if result buff is all 0's + // check if result buff is all 0's if result_buffer == [0u8; 32] { msg!("vrf buffer empty"); return Ok(()); } msg!("Result buffer is {:?}", result_buffer); - // use the random value how you see fit + // use the random value how you see fit Ok(()) } ``` -Now you have randomness! Hooray! But there is one last thing we have not talked -about yet and that's how the randomness is returned. Switchboard, gives you your +Now you have randomness! Hooray! But there is one last thing we have not discussed +yet and that's how the randomness is returned. Switchboard, gives you your randomness calling -`[get_result()](https://github.com/switchboard-xyz/solana-sdk/blob/9dc3df8a5abe261e23d46d14f9e80a7032bb346c/rust/switchboard-solana/src/oracle_program/accounts/vrf.rs#L122)`. +[`get_result()`](https://github.com/switchboard-xyz/solana-sdk/blob/9dc3df8a5abe261e23d46d14f9e80a7032bb346c/rust/switchboard-solana/src/oracle_program/accounts/vrf.rs#L122). This method returns the `current_round.result` field of the `vrf` account -SwitchboardDecimal format, which is really just a buffer of 32 random +SwitchboardDecimal format, which is just a buffer of 32 random `[u8](https://github.com/switchboard-xyz/solana-sdk/blob/9dc3df8a5abe261e23d46d14f9e80a7032bb346c/rust/switchboard-solana/src/oracle_program/accounts/ecvrf.rs#L65C26-L65C26)` -unsigned-integers. You can use these unsigned-integers however you see fit in +unsigned integers. You can use these unsigned integers however you see fit in your program, but a very common method is to treat each integer in the buffer as its own random number. For example, if you need a dice roll (1-6) just take the first byte of the array, module it with 6 and add one. @@ -448,27 +447,26 @@ lesson's codebase. If you don't want to complete the Oracle lesson, the starter code for this lab is provided for you in -[the main branch of the lab Github repository](https://github.com/Unboxed-Software/michael-burry-escrow). +[the main branch of the lab Github repository](https://github.com/solana-developers/burry-escrow). +[the main branch of the lab Github repository](https://github.com/solana-developers/burry-escrow). -The repo contains a "Michael Burry" escrow program. This is a program that -allows a user to lock up some solana funds in escrow that cannot be withdrawn +The repo contains a "Michael Burry" escrow program. This program +allows a user to lock up some SOL funds in escrow that cannot be withdrawn until SOL has reached a predefined price in USD chosen by the user. We will be adding VRF functionality to this program to allow the user to "Get out of jail" by rolling doubles. Our demo today will allow the user to roll two virtual dice, if they roll doubles (the two dice match), the user can withdraw their funds from escrow regardless of the SOL price. -#### 1. Program Setup +### 1. Program Setup If you are cloning the repo from the previous lesson make sure to do the following: -1. `git clone https://github.com/Unboxed-Software/michael-burry-escrow` -2. `cd michael-burry-escrow` +1. `git clone https://github.com/solana-developers/burry-escrow` +2. `cd burry-escrow` 3. `anchor build` -4. `anchor keys list` - 1. Take the resulting key and put it into `Anchor.toml` and - `programs/burry-escrow/src/lib.rs` +4. `anchor keys sync` 5. `solana config get` 1. Take your **Keypair Path** and change the `wallet` field in your `Anchor.toml` @@ -478,7 +476,7 @@ following: When all tests pass we're ready to begin. We will start by filling in some boilerplate stuff, then we'll implement the functions. -#### 2. Cargo.toml +### 2. Cargo.toml First, since VRF uses SPL tokens for their fees we need to import `anchor-spl` in our `Cargo.toml` file. @@ -487,10 +485,10 @@ in our `Cargo.toml` file. [dependencies] anchor-lang = "0.28.0" anchor-spl = "0.28.0" -switchboard-v2 = "0.4.0" +switchboard-solana = "0.28.0" ``` -#### 3. Lib.rs +### 3. Lib.rs Next, let's edit `lib.rs` and add the additional functions we'll be building today. The functions are as follows: @@ -523,8 +521,8 @@ mod burry_escrow { use super::*; - pub fn deposit(ctx: Context, escrow_amt: u64, unlock_price: f64) -> Result<()> { - deposit_handler(ctx, escrow_amt, unlock_price) + pub fn deposit(ctx: Context, escrow_amount: u64, unlock_price: u64) -> Result<()> { + deposit_handler(ctx, escrow_amount, unlock_price) } pub fn withdraw(ctx: Context) -> Result<()> { @@ -535,7 +533,7 @@ mod burry_escrow { init_vrf_client_handler(ctx) } - pub fn get_out_of_jail(ctx: Context, params: RequestRandomnessParams) -> Result<()>{ + pub fn get_out_of_jail(ctx: Context, params: RequestRandomnessParams) -> Result<()>{ get_out_of_jail_handler(ctx, params) } @@ -547,24 +545,24 @@ mod burry_escrow { Make sure you replace `YOUR_KEY_HERE` with your own program key. -#### 4. State.rs +### 4. State.rs -Next, in `state.rs`, add an `out_of_jail` flag to `EscrowState`. When we finally -roll two matching die, we'll flip this flag. When the `withdraw` function is -called we can transfer the funds without checking the price. +Next, in `state.rs`, add an `out_of_jail` flag to `Escrow`. When we finally roll +two matching die, we'll flip this flag. When the `withdraw` function is called +we can transfer the funds without checking the price. ```rust // state.rs #[account] -pub struct EscrowState { - pub unlock_price: f64, +pub struct Escrow { + pub unlock_price: u64, pub escrow_amount: u64, pub out_of_jail: bool } ``` -Then, create our second data account for this program: `VrfClientState`. This -will hold the state of our dice rolls. It will have the following fields: +Then, create our second data account for this program: `VrfClient`. This will +hold the state of our dice rolls. It will have the following fields: - `bump` - Stores the bump of the account for easy signing later. - `result_buffer` - This is where the VRF function will dump the raw randomness @@ -573,10 +571,10 @@ will hold the state of our dice rolls. It will have the following fields: - `die_result_1` and `die_result_2` - The results of our dice roll. - `timestamp` - Keeps track of when our last roll was. - `vrf` - Public key of the VRF account; owned by the Switchboard program. We - will create this before we call `VrfClientState`'s initialization function. + will create this before we call `VrfClient`'s initialization function. - `escrow` - Public key of our burry escrow account. -We're also going to make the `VrfClientState` context a `zero_copy` struct. This +We will also make the `VrfClient` context a `zero_copy` struct. This means that we will initialize it with `load_init()` and pass it into accounts with `AccountLoader`. We do this because VRF functions are very account intensive and we need to be mindful of the stack. If you'd like to learn more @@ -589,10 +587,10 @@ about `zero_copy`, take a look at our #[repr(packed)] #[account(zero_copy(unsafe))] #[derive(Default)] -pub struct VrfClientState { +pub struct VrfClient { pub bump: u8, pub result_buffer: [u8; 32], - pub dice_type: u8, // 6 sided + pub dice_type: u8, // 6 sided pub die_result_1: u8, pub die_result_2: u8, pub timestamp: i64, @@ -601,7 +599,7 @@ pub struct VrfClientState { } ``` -Lastly we are going to add the `VRF_STATE_SEED` to PDA our VRF Client account. +Lastly, we are going to add the `VRF_STATE_SEED` to the PDA of our VRF Client account. ```rust pub const VRF_STATE_SEED: &[u8] = b"VRFCLIENT"; @@ -617,8 +615,8 @@ pub const VRF_STATE_SEED: &[u8] = b"VRFCLIENT"; pub const SOL_USDC_FEED: &str = "GvDMxPzN1sCj7L26YDK2HnMRXEQmQ2aemov8YBtPS7vR"; #[account] -pub struct EscrowState { - pub unlock_price: f64, +pub struct Escrow { + pub unlock_price: u64, pub escrow_amount: u64, pub out_of_jail: bool } @@ -626,10 +624,10 @@ pub struct EscrowState { #[repr(packed)] #[account(zero_copy(unsafe))] #[derive(Default)] -pub struct VrfClientState { +pub struct VrfClient { pub bump: u8, pub result_buffer: [u8; 32], - pub dice_type: u8, // 6 sided + pub dice_type: u8, // 6 sided pub die_result_1: u8, pub die_result_2: u8, pub timestamp: i64, @@ -638,7 +636,7 @@ pub struct VrfClientState { } ``` -#### 5. Errors.rs +### 5. Errors.rs Next, let's take a quick pit stop and add one last error `InvalidVrfAuthorityError` to `errors.rs`. We'll use this when the VRF authority @@ -663,7 +661,7 @@ pub enum EscrowErrorCode { } ``` -#### 6. Mod.rs +### 6. Mod.rs Now, let's modify our `mod.rs` file to include our new functions we'll be writing. @@ -676,7 +674,7 @@ pub mod get_out_of_jail; pub mod consume_randomness; ``` -#### 7. Deposit.rs and Withdraw.rs +### 7. Deposit.rs and Withdraw.rs Lastly, let's update our `deposit.rs` and `withdraw.rs` files to reflect our soon-to-be new powers. @@ -686,30 +684,30 @@ First, let's initialize our `out_of_jail` flag to `false` in `deposit.rs`. ```rust // in deposit.rs ... -let escrow_state = &mut ctx.accounts.escrow_account; - escrow_state.unlock_price = unlock_price; - escrow_state.escrow_amount = escrow_amount; - escrow_state.out_of_jail = false; +let escrow = &mut ctx.accounts.escrow_account; + escrow.unlock_price = unlock_price; + escrow.escrow_amount = escrow_amount; + escrow.out_of_jail = false; ... ``` Next, let's write our simple get-out-of-jail logic. Wrap our oracle price-checks -with an `if` statement. If the `out_of_jail` flag on the `escrow_state` account -is false, then we check the price at which to unlock the SOL: +with an `if` statement. If the `out_of_jail` flag on the `escrow` account is +false, then we check the price at which to unlock the SOL: ```rust -if !escrow_state.out_of_jail { +if !escrow.out_of_jail { // get result let val: f64 = feed.get_result()?.try_into()?; // check whether the feed has been updated in the last 300 seconds - feed.check_staleness(Clock::get().unwrap().unix_timestamp, 300) + feed.check_staleness(Clock::get().unwrap().uninstruction_timestamp, 300) .map_err(|_| error!(EscrowErrorCode::StaleFeed))?; msg!("Current feed result is {}!", val); - msg!("Unlock price is {}", escrow_state.unlock_price); + msg!("Unlock price is {}", escrow.unlock_price); - if val < escrow_state.unlock_price as f64 { + if val < escrow.unlock_price as f64 { return Err(EscrowErrorCode::SolPriceAboveUnlockPrice.into()) } } @@ -718,7 +716,7 @@ if !escrow_state.out_of_jail { If `out_of_jail` is true, then we get out of jail free and can skip the price check, going straight to our withdrawal. -#### 8. Using VRF +### 8. Using VRF Now that we have the boilerplate out of the way, let's move on to our first addition: initializing our VRF Client. Let's create a new file called @@ -730,8 +728,8 @@ the following accounts: - `user` - the signer who has funds in escrow. - `escrow_account` - the burry escrow account created when the user locked their funds up. -- `vrf_client_state` - account we will be creating in this instruction to hold - state about the user's dice rolls. +- `vrf_client` - account we will be creating in this instruction to hold state + about the user’s dice rolls. - `vrf` - Our VRF owned by the Switchboard program, we will create this account client-side before we call `init_vrf_client`. - `system_program` - The system program since we use the init macro for @@ -741,7 +739,9 @@ the following accounts: use crate::state::*; use crate::errors::*; use anchor_lang::prelude::*; -use switchboard_v2::VrfAccountData; +use switchboard_solana::VrfAccountData; + +pub const ANCHOR_DISCRIMINATOR: usize = 8; #[derive(Accounts)] pub struct InitVrfClient<'info> { @@ -753,21 +753,22 @@ pub struct InitVrfClient<'info> { seeds = [ESCROW_SEED, user.key().as_ref()], bump, )] - pub escrow_account: Account<'info, EscrowState>, - // vrf client state + pub escrow_account: Account<'info, Escrow>, + // vrf client + #[derive(InitSpace)] #[account( init, seeds = [ - VRF_STATE_SEED, + VRF_STATE_SEED, user.key.as_ref(), escrow_account.key().as_ref(), vrf.key().as_ref(), ], payer = user, - space = 8 + std::mem::size_of::(), + space = VrfClient::INIT_SPACE + ANCHOR_DISCRIMINATOR, bump )] - pub vrf_state: AccountLoader<'info, VrfClientState>, + pub vrf_state: AccountLoader<'info, VrfClient>, // switchboard vrf account #[account( @@ -782,11 +783,11 @@ pub struct InitVrfClient<'info> { Notice the `vrf_state` account is a PDA derived with the `VRF_STATE_SEED` string and the `user`, `escrow_account`, and `vrf` public keys as seeds. This means a single user can only initialize a single `vrf_state` account, just like they can -only have one `escrow_account`. Since there is only one, If you wanted to be +only have one `escrow_account`. Since there is only one, If you want to be thorough, you might want to implement a `close_vrf_state` function to get your rent back. -Now, let's write some basic initialization logic for this function. First we +Now, let’s write some basic initialization logic for this function. First, we load and initialize our `vrf_state` account by calling `load_init()`. Then we fill in the values for each field. @@ -795,7 +796,7 @@ pub fn init_vrf_client_handler(ctx: Context) -> Result<()> { msg!("init_client validate"); let mut vrf_state = ctx.accounts.vrf_state.load_init()?; - *vrf_state = VrfClientState::default(); + *vrf_state = VrfClient::default(); vrf_state.bump = ctx.bumps.get("vrf_state").unwrap().clone(); vrf_state.escrow = ctx.accounts.escrow_account.key(); vrf_state.die_result_1 = 0; @@ -807,9 +808,9 @@ pub fn init_vrf_client_handler(ctx: Context) -> Result<()> { } ``` -#### 9. Get Out of Jail +### 9. Get Out of Jail -Now that we have the `VrfClientState` account initialized, we can use it in the +Now that we have the `VrfClient` account initialized, we can use it in the `get_out_jail` instruction. Create a new file called `get_out_of_jail.rs` in the `/instructions` folder. @@ -821,10 +822,10 @@ VRF Accounts: - `payer_wallet` - the token wallet that will pay for the VRF request; the `user` must be the owner of this account. -- `vrf` - The VRF account that was created by the client. -- `oracle_queue` - The oracle queue that will field the randomness result. +- `vrf` - The VRF account created by the client. +- `oracle_queue` - The oracle queue that will feed the randomness result. - `queue_authority` - The authority over the queue. -- `data_buffer` - The queue's data buffer account - used by the queue to +- `data_buffer` - The queue's data buffer account which is used by the queue to compute/verify the randomness. - `permission` - Created when creating the `vrf` account. It's derived from several of the other accounts. @@ -850,7 +851,7 @@ use crate::state::*; use crate::errors::*; use anchor_lang::prelude::*; use anchor_lang::solana_program::sysvar::*; -use switchboard_v2::{VrfAccountData, OracleQueueAccountData, PermissionAccountData, SbState, VrfRequestRandomness}; +use switchboard_solana::{VrfAccountData, OracleQueueAccountData, PermissionAccountData, SbState, VrfRequestRandomness}; use anchor_spl::token::{TokenAccount, Token}; #[derive(Accounts)] @@ -870,7 +871,7 @@ pub struct RequestRandomness<'info> { seeds = [ESCROW_SEED, user.key().as_ref()], bump, )] - pub escrow_account: Account<'info, EscrowState>, + pub escrow_account: Account<'info, Escrow>, // vrf client state #[account( mut, @@ -882,7 +883,7 @@ pub struct RequestRandomness<'info> { ], bump )] - pub vrf_state: AccountLoader<'info, VrfClientState>, + pub vrf_state: AccountLoader<'info, VrfClient>, // switchboard vrf account #[account( mut, @@ -937,10 +938,10 @@ pub struct RequestRandomnessParams { } ``` -Now, we can work on the logic of this instruction. The logic should gather all -of the accounts needed and pass them to +Now, we can focus on implementing the logic of this instruction. The logic should collect all +the necessary accounts needed and pass them to `[VrfRequestRandomness](https://github.com/switchboard-xyz/solana-sdk/blob/fbef37e4a78cbd8b8b6346fcb96af1e20204b861/rust/switchboard-solana/src/oracle_program/instructions/vrf_request_randomness.rs#L8)`, -which is a really nice struct from Switchboard. Then we'll sign the request and +which is a well designed struct from Switchboard. After that we'll sign the request and send it on it's way. ```rust @@ -951,7 +952,7 @@ pub fn get_out_of_jail_handler(ctx: Context, params: RequestR let bump = vrf_state.bump.clone(); drop(vrf_state); - // build vrf request struct from the Switchboard Rust crate + // build vrf request struct from the Switchboard Rust crate let vrf_request_randomness = VrfRequestRandomness { authority: ctx.accounts.vrf_state.to_account_info(), vrf: ctx.accounts.vrf.to_account_info(), @@ -971,7 +972,7 @@ pub fn get_out_of_jail_handler(ctx: Context, params: RequestR let escrow_key = ctx.accounts.escrow_account.key(); let user_key = ctx.accounts.user.key(); let state_seeds: &[&[&[u8]]] = &[&[ - &VRF_STATE_SEED, + &VRF_STATE_SEED, user_key.as_ref(), escrow_key.as_ref(), vrf_key.as_ref(), @@ -993,7 +994,7 @@ pub fn get_out_of_jail_handler(ctx: Context, params: RequestR } ``` -#### 10. Consume Randomness +### 10. Consume Randomness Now that we've built the logic to request a VRF from Switchboard, we must build the callback instruction the Switchboard program will call once the VRF has been @@ -1016,16 +1017,16 @@ three accounts. use crate::state::*; use crate::errors::*; use anchor_lang::prelude::*; -use switchboard_v2::VrfAccountData; +use switchboard_solana::VrfAccountData; #[derive(Accounts)] pub struct ConsumeRandomness<'info> { // burry escrow account #[account(mut)] - pub escrow_account: Account<'info, EscrowState>, + pub escrow_account: Account<'info, Escrow>, // vrf client state #[account(mut)] - pub vrf_state: AccountLoader<'info, VrfClientState>, + pub vrf_state: AccountLoader<'info, VrfClient>, // switchboard vrf account #[account( mut, @@ -1059,15 +1060,15 @@ pub fn consume_randomness_handler(ctx: Context) -> Result <() return Ok(()); } - Ok(()) + Ok(()) } ``` Then we load our `vrf_state` using `load_mut` since we'll be storing the randomness and dice rolls within it. We also want to check that the -`result_buffer` returned from the `vrf` does not match byte for byte the -`result_buffer` from the `vrf_state`. If they do match, we know the returned -randomness is stale. +`result_buffer` returned from the `vrf` does not match the `result_buffer` from +the `vrf_state` byte for byte. If they do match, we know the returned randomness +is stale. ```rust pub fn consume_randomness_handler(ctx: Context) -> Result <()> { @@ -1080,29 +1081,29 @@ pub fn consume_randomness_handler(ctx: Context) -> Result <() msg!("vrf buffer empty"); return Ok(()); } - // new code + // new code let vrf_state = &mut ctx.accounts.vrf_state.load_mut()?; if result_buffer == vrf_state.result_buffer { msg!("result_buffer unchanged"); return Ok(()); } - ... - ... + ... + ... } ``` -Now it's time to actually use the random result. Since we only use two dice we +Now it’s time to use the random result. Since we're working with two dice we only need the first two bytes of the buffer. To convert these random values into “dice rolls”, we use modular arithmetic. For anyone not familiar with modular arithmetic, -[Wikipedia can help](https://en.wikipedia.org/wiki/Modular_arithmetic). In -modular arithmetic, numbers "wrap around" upon reaching a given fixed quantity. -This given quantity is known as the modulus to leave as the remainder. Here, the +[this Wikipedia article](https://en.wikipedia.org/wiki/Modular_arithmetic) provides a helpful introduction. In +modular arithmetic, numbers "wrap around" when they reach a given fixed given quantity. +This given quantity is known as the modulus to leave as the remainder. In our case, the modulus is the `dice_type` stored on the `vrf_state` account. We hard-coded this to 6 when the account was initialized to represent a 6-sided die. When we use -`dice_type`, or 6, as the modulus, our result will be a number 0-5. We then add -one, to make the resulting possibilities 1-6. +`dice_type`, or 6, as the modulus, our result will be a number between 0 and 5. We then add +one to shift tha range, making the possible outcomes 1-6. ```rust pub fn consume_randomness_handler(ctx: Context) -> Result <()> { @@ -1131,8 +1132,8 @@ pub fn consume_randomness_handler(ctx: Context) -> Result <() msg!("Current Die 1 Value [1 - {}) = {}!", dice_type, dice_1); msg!("Current Die 2 Value [1 - {}) = {}!", dice_type, dice_2); - ... - ... + ... + ... } ``` @@ -1187,8 +1188,8 @@ pub fn consume_randomness_handler(ctx: Context) -> Result <() if dice_1 == dice_2 { msg!("Rolled doubles, get out of jail free!"); - let escrow_state = &mut ctx.accounts.escrow_account; - escrow_state.out_of_jail = true; + let escrow = &mut ctx.accounts.escrow_account; + escrow.out_of_jail = true; } Ok(()) @@ -1199,7 +1200,7 @@ And that's it for the get-out-of-jail functionality! Congrats, you have just built a program that can consume Switchboard data feeds and submit VRF requests. Please make sure your program builds successfully by running `anchor build`. -#### 11. Testing +### 11. Testing Alright, let's test our program. Historically, we'd need to test the VRF on Devnet. Fortunately, the folks at Switchboard have created some really nice @@ -1211,13 +1212,13 @@ file: ```toml ## VRF ACCOUNTS -[[test.validator.clone]] # sbv2 attestation programID +[[test.validator.clone]] # Switchboard solana attestation programID address = "sbattyXrzedoNATfc4L31wC9Mhxsi1BmFhTiN8gDshx" -[[test.validator.clone]] # sbv2 attestation IDL +[[test.validator.clone]] # Switchboard solana attestation IDL address = "5ExuoQR69trmKQfB95fDsUGsUrrChbGq9PFgt8qouncz" -[[test.validator.clone]] # sbv2 SbState +[[test.validator.clone]] # Switchboard solana SbState address = "CyZuD7RPDcrqCGbNvLCyqk6Py9cEZTKmNKujfPi3ynDd" ``` @@ -1227,7 +1228,7 @@ imports, and adds a new function called `delay`. ```typescript import * as anchor from "@coral-xyz/anchor"; -import { Program } from "@coral-xyz/anchor"; +import { Program, BN } from "@coral-xyz/anchor"; import { BurryEscrow } from "../target/types/burry_escrow"; import { Big } from "@switchboard-xyz/common"; import { @@ -1241,7 +1242,7 @@ import { import { NodeOracle } from "@switchboard-xyz/oracle"; import { assert } from "chai"; -export const solUsedSwitchboardFeed = new anchor.web3.PublicKey( +export const solUsdSwitchboardFeed = new anchor.web3.PublicKey( "GvDMxPzN1sCj7L26YDK2HnMRXEQmQ2aemov8YBtPS7vR", ); @@ -1265,51 +1266,51 @@ describe("burry-escrow-vrf", () => { ); const aggregatorAccount = new AggregatorAccount( switchboardProgram, - solUsedSwitchboardFeed, + solUsdSwitchboardFeed, ); // derive escrow state account - const [escrowState] = await anchor.web3.PublicKey.findProgramAddressSync( + const [Escrow] = await anchor.web3.PublicKey.findProgramAddressSync( [Buffer.from("MICHAEL BURRY"), payer.publicKey.toBuffer()], program.programId, ); - console.log("Escrow Account: ", escrowState.toBase58()); + console.log("Escrow Account: ", Escrow.toBase58()); // fetch latest SOL price const solPrice: Big | null = await aggregatorAccount.fetchLatestValue(); if (solPrice === null) { throw new Error("Aggregator holds no value"); } - const failUnlockPrice = solPrice.plus(10).toNumber(); - const amountToLockUp = new anchor.BN(100); + const failUnlockPrice = new BN(solPrice.plus(10).toNumber()); + const amountToLockUp = new BN(100); // Send transaction try { - const tx = await program.methods + const transaction = await program.methods .deposit(amountToLockUp, failUnlockPrice) .accounts({ user: payer.publicKey, - escrowAccount: escrowState, + escrowAccount: Escrow, systemProgram: anchor.web3.SystemProgram.programId, }) .signers([payer]) .rpc(); - await provider.connection.confirmTransaction(tx, "confirmed"); - console.log("Your transaction signature", tx); + await provider.connection.confirmTransaction(transaction, "confirmed"); + console.log("Your transaction signature", transaction); // Fetch the created account - const newAccount = await program.account.escrowState.fetch(escrowState); + const newAccount = await program.account.escrow.fetch(Escrow); const escrowBalance = await provider.connection.getBalance( - escrowState, + Escrow, "confirmed", ); console.log("Onchain unlock price:", newAccount.unlockPrice); console.log("Amount in escrow:", escrowBalance); // Check whether the data onchain is equal to local 'data' - assert(failUnlockPrice == newAccount.unlockPrice); + assert(failUnlockPrice.eq(newAccount.unlockPrice)); assert(escrowBalance > 0); } catch (error) { console.log(error); @@ -1321,31 +1322,31 @@ describe("burry-escrow-vrf", () => { let didFail = false; // derive escrow address - const [escrowState] = await anchor.web3.PublicKey.findProgramAddressSync( + const [Escrow] = await anchor.web3.PublicKey.findProgramAddressSync( [Buffer.from("MICHAEL BURRY"), payer.publicKey.toBuffer()], program.programId, ); - // send tx + // send transaction try { - const tx = await program.methods + const transaction = await program.methods .withdraw() .accounts({ user: payer.publicKey, - escrowAccount: escrowState, - feedAggregator: solUsedSwitchboardFeed, + escrowAccount: Escrow, + feedAggregator: solUsdSwitchboardFeed, systemProgram: anchor.web3.SystemProgram.programId, }) .signers([payer]) .rpc(); - await provider.connection.confirmTransaction(tx, "confirmed"); - console.log("Your transaction signature", tx); + await provider.connection.confirmTransaction(transaction, "confirmed"); + console.log("Your transaction signature", transaction); } catch (error) { didFail = true; assert( - error.message.includes( + error.errorMessage.includes( "Current SOL price is not above Escrow unlock price.", ), "Unexpected error message: " + error.message, @@ -1359,7 +1360,7 @@ describe("burry-escrow-vrf", () => { If you only want to run the vrf tests, change -`describe("burry-escrow-vrf", () => {` to: +`describe("burry-escrow-vrf", () => {` to: `describe.only("burry-escrow-vrf", () => {` @@ -1439,14 +1440,14 @@ describe.only("burry-escrow-vrf", () => { Now let's run the actual test. We'll structure the test to keep rolling dice until we get doubles, then we'll check that we can withdraw the funds. -First, we'll gather all of the accounts we need. The `switchboard` test context +First, we'll gather all the accounts we need. The `switchboard` test context gives us most of these. Then we'll need to call our `initVrfClient` function. Finally, we'll roll our dice in a loop and check for doubles. ```typescript it("Roll till you can withdraw", async () => { // derive escrow address - const [escrowState] = await anchor.web3.PublicKey.findProgramAddressSync( + const [Escrow] = await anchor.web3.PublicKey.findProgramAddressSync( [Buffer.from("MICHAEL BURRY"), payer.publicKey.toBuffer()], program.programId, ); @@ -1456,24 +1457,24 @@ it("Roll till you can withdraw", async () => { [ Buffer.from("VRFCLIENT"), payer.publicKey.toBytes(), - escrowState.toBytes(), + Escrow.toBytes(), vrfSecret.publicKey.toBytes(), ], program.programId, ); console.log(`VRF Client: ${vrfClientKey}`); - const vrfIxCoder = new anchor.BorshInstructionCoder(program.idl); + const vrfInstructionCoder = new anchor.BorshInstructionCoder(program.idl); const vrfClientCallback: Callback = { programId: program.programId, accounts: [ // ensure all accounts in consumeRandomness are populated // { pubkey: payer.publicKey, isSigner: false, isWritable: true }, - { pubkey: escrowState, isSigner: false, isWritable: true }, + { pubkey: Escrow, isSigner: false, isWritable: true }, { pubkey: vrfClientKey, isSigner: false, isWritable: true }, { pubkey: vrfSecret.publicKey, isSigner: false, isWritable: true }, ], - ixData: vrfIxCoder.encode("consumeRandomness", ""), // pass any params for instruction here + instructionData: vrfInstructionCoder.encode("consumeRandomness", ""), // pass any params for instruction here }; const queue = await switchboard.queue.loadData(); @@ -1507,11 +1508,11 @@ it("Roll till you can withdraw", async () => { // initialize vrf client try { - const tx = await program.methods + const transaction = await program.methods .initVrfClient() .accounts({ user: payer.publicKey, - escrowAccount: escrowState, + escrowAccount: Escrow, vrfState: vrfClientKey, vrf: vrfAccount.publicKey, systemProgram: anchor.web3.SystemProgram.programId, @@ -1527,7 +1528,7 @@ it("Roll till you can withdraw", async () => { while (!rolledDoubles) { try { // Request randomness and roll dice - const tx = await program.methods + const transaction = await program.methods .getOutOfJail({ switchboardStateBump: switchboard.program.programState.bump, permissionBump, @@ -1537,7 +1538,7 @@ it("Roll till you can withdraw", async () => { vrf: vrfAccount.publicKey, user: payer.publicKey, payerWallet: payerTokenWallet, - escrowAccount: escrowState, + escrowAccount: Escrow, oracleQueue: switchboard.queue.publicKey, queueAuthority: queue.authority, dataBuffer: queue.dataBuffer, @@ -1553,18 +1554,18 @@ it("Roll till you can withdraw", async () => { .signers([payer]) .rpc(); - await provider.connection.confirmTransaction(tx, "confirmed"); + await provider.connection.confirmTransaction(transaction, "confirmed"); console.log(`Created VrfClient Account: ${vrfClientKey}`); - // wait a few sec for switchboard to generate the random number and invoke callback ix + // wait a few seconds for switchboard to generate the random number and invoke the callback instruction console.log("Rolling Die..."); let didUpdate = false; - let vrfState = await program.account.vrfClientState.fetch(vrfClientKey); + let vrfState = await program.account.vrfClient.fetch(vrfClientKey); while (!didUpdate) { console.log("Checking die..."); - vrfState = await program.account.vrfClientState.fetch(vrfClientKey); + vrfState = await program.account.vrfClient.fetch(vrfClientKey); didUpdate = vrfState.timestamp.toNumber() > 0; await delay(1000); } @@ -1587,18 +1588,18 @@ it("Roll till you can withdraw", async () => { } } - const tx = await program.methods + const transaction = await program.methods .withdraw() .accounts({ user: payer.publicKey, - escrowAccount: escrowState, + escrowAccount: Escrow, feedAggregator: solUsedSwitchboardFeed, systemProgram: anchor.web3.SystemProgram.programId, }) .signers([payer]) .rpc(); - await provider.connection.confirmTransaction(tx, "confirmed"); + await provider.connection.confirmTransaction(transaction, "confirmed"); }); ``` @@ -1620,7 +1621,7 @@ And there you have it! You should be able to run and pass all of the tests using If something is not working, go back and find where you went wrong. Alternatively feel free to try out the -[solution code on the `vrf` branch](https://github.com/Unboxed-Software/michael-burry-escrow/tree/vrf). +[solution code on the `vrf` branch](https://github.com/solana-developers/burry-escrow/tree/vrf). Remember to update your program keys and wallet path like we did in the [the Setup step](#1-program-setup). @@ -1633,7 +1634,7 @@ they roll 3 times without rolling doubles, they should be able to withdraw their funds, just like getting out of jail in Monopoly. If you get stuck, we have the solution in the -[`vrf-challenge-solution` branch](https://github.com/Unboxed-Software/michael-burry-escrow/tree/vrf-challenge-solution). +[`vrf-challenge-solution` branch](https://github.com/solana-developers/burry-escrow/tree/vrf-challenge-solution). Push your code to GitHub and diff --git a/content/courses/intro-to-solana/getting-started.md b/content/courses/intro-to-solana/getting-started.md index d2e5e3a73..15565d26c 100644 --- a/content/courses/intro-to-solana/getting-started.md +++ b/content/courses/intro-to-solana/getting-started.md @@ -8,7 +8,7 @@ objectives: description: "Understand what web3, blockchains, and Solana are." --- -## Welcome! +## Welcome Welcome to the best starting point for developers looking to learn web3 and blockchain! diff --git a/content/courses/intro-to-solana/interact-with-wallets.md b/content/courses/intro-to-solana/interact-with-wallets.md index 27738df9f..76cfc9db1 100644 --- a/content/courses/intro-to-solana/interact-with-wallets.md +++ b/content/courses/intro-to-solana/interact-with-wallets.md @@ -113,12 +113,13 @@ ensure this is to wrap your entire app in `ConnectionProvider` and ```tsx import { NextPage } from "next"; -import { FC, ReactNode } from "react"; +import { FC, ReactNode, useMemo } from "react"; import { ConnectionProvider, WalletProvider, } from "@solana/wallet-adapter-react"; import { clusterApiUrl } from "@solana/web3.js"; +import "@solana/wallet-adapter-react-ui/styles.css"; export const Home: NextPage = props => { const endpoint = clusterApiUrl("devnet"); @@ -158,7 +159,7 @@ full-featured wallet experience is to use `WalletModalProvider` and ```tsx import { NextPage } from "next"; -import { FC, ReactNode } from "react"; +import { FC, ReactNode, useMemo } from "react"; import { ConnectionProvider, WalletProvider, @@ -173,6 +174,7 @@ import { PublicKey, SystemProgram, } from "@solana/web3.js"; +import "@solana/wallet-adapter-react-ui/styles.css"; const Home: NextPage = props => { const endpoint = clusterApiUrl("devnet"); @@ -265,7 +267,7 @@ export const BalanceDisplay: FC = () => { return (
-

{publicKey ? `Balance: ${balance / LAMPORTS_PER_SOL} SOL` : ""}

+

{publicKey ? `Balance: ${balance} SOL` : ""}

); }; @@ -346,6 +348,8 @@ using in this lab. ### Download the starter code + + Download the [starter code for this project](https://github.com/Unboxed-Software/solana-ping-frontend/tree/starter). This project is a simple Next.js application. It's mostly empty except for the @@ -422,7 +426,7 @@ To complete this component, add ensure proper styling and behavior of the Wallet Adapter library components. ```tsx -import { FC, ReactNode } from "react"; +import { FC, ReactNode, useMemo } from "react"; import { ConnectionProvider, WalletProvider, diff --git a/content/courses/intro-to-solana/intro-to-cryptography.md b/content/courses/intro-to-solana/intro-to-cryptography.md index 6f1c3d83b..396e3032a 100644 --- a/content/courses/intro-to-solana/intro-to-cryptography.md +++ b/content/courses/intro-to-solana/intro-to-cryptography.md @@ -20,6 +20,9 @@ description: "Understand asymmetric cryptography and how Solana uses it." ## Lesson +In this lesson, we will explore the basics of cryptography and how it's applied +within the Solana ecosystem. + ### Symmetric and Asymmetric Cryptography 'Cryptography' the study of hiding information. There are two main types of @@ -157,6 +160,9 @@ You know how to make and load keypairs! Let's practice what we've learned. ## Lab +In this lab we will learn about keypairs, and how to store secret keys securely +on solana + ### Installation Make a new directory, install TypeScript, Solana web3.js and esrun: diff --git a/content/courses/intro-to-solana/intro-to-custom-onchain-programs.md b/content/courses/intro-to-solana/intro-to-custom-onchain-programs.md index c0b0b3400..8ad09db6e 100644 --- a/content/courses/intro-to-solana/intro-to-custom-onchain-programs.md +++ b/content/courses/intro-to-solana/intro-to-custom-onchain-programs.md @@ -17,15 +17,9 @@ invoked in the onchain program. ### Instructions -In previous chapters, we used: - -- The `SystemProgram.transfer()` function from `@solana/web3.js` to make an - instruction for the System program to transfer SOL. -- The `mintTo()` and `transfer()` functions from `@solana/spl-token`, to make - instructions to the Token program to mint and transfer tokens -- The `createCreateMetadataAccountV3Instruction()` function from - `@metaplex-foundation/mpl-token-metadata@2` to make instructions to Metaplex - to create token Metadata. +In previous lessons, we used the `SystemProgram.transfer()` function from +`@solana/web3.js`, which creates an instruction for the System program to +transfer SOL. When working with other programs, however, you'll need to create instructions manually. With `@solana/web3.js`, you can create instructions with the diff --git a/content/courses/intro-to-solana/intro-to-writing-data.md b/content/courses/intro-to-solana/intro-to-writing-data.md index deede360e..3bff43d78 100644 --- a/content/courses/intro-to-solana/intro-to-writing-data.md +++ b/content/courses/intro-to-solana/intro-to-writing-data.md @@ -235,7 +235,7 @@ console.log( console.log(`Transaction signature is ${signature}!`); ``` -### Experiment! +### Experiment Send SOL to other students in the class. diff --git a/content/courses/mobile/intro-to-solana-mobile.md b/content/courses/mobile/intro-to-solana-mobile.md index cf6a9002b..37dcad0c5 100644 --- a/content/courses/mobile/intro-to-solana-mobile.md +++ b/content/courses/mobile/intro-to-solana-mobile.md @@ -1,50 +1,65 @@ --- title: Introduction to Solana Mobile objectives: - - Explain the benefits of creating mobile-first dApp experiences + - Explain the benefits of creating mobile-first App experiences - Explain the high-level Mobile Wallet Adapter (MWA) flow - Explain the high-level differences between React and React Native - - Create a simple Android Solana dApp using React Native + - Create a simple Android Solana App using React Native description: "Learn how to build native mobile apps using blockchain functionality" --- ## Summary -- The Solana Mobile Wallet Adapter (MWA) creates a web socket connection between - mobile apps and mobile wallets, allowing native mobile apps to submit - transactions for signing -- The simplest way to get started creating Solana mobile applications is with - Solana Mobile's - [React Native packages](https://docs.solanamobile.com/react-native/setup) +- The **Solana Mobile Wallet Adapter** (**MWA**) allows mobile apps to submit + transactions for signing via a WebSocket connection to mobile wallets. +- The easiest way to start building Solana mobile applications is by using + Solana Mobile’s + [React Native packages](https://docs.solanamobile.com/react-native/setup) - `@solana-mobile/mobile-wallet-adapter-protocol` and `@solana-mobile/mobile-wallet-adapter-protocol-web3js` -- React Native is very similar to React with a few mobile quirks -## Lesson +## Lesson Overview -Solana Mobile Stack (SMS) is designed to help developers create mobile dApps -with a seamless UX. It consists of the -[Mobile Wallet Adapter (MWA)](https://docs.solanamobile.com/getting-started/overview#mobile-wallet-adapter), +In these lessons, we will develop mobile apps that interact with the Solana +network, this opens up a whole new paradigm of blockchain use cases and +behaviors. The **Solana Mobile Stack** (**SMS**) is designed to help developers +seamlessly create mobile apps. It includes the +[Mobile Wallet Adapter (MWA)](https://docs.solanamobile.com/getting-started/overview#mobile-wallet-adapter) +, a Solana Mobile SDK that uses React Native, [Seed Vault](https://docs.solanamobile.com/getting-started/overview#seed-vault), and the -[Solana dApp Store](https://docs.solanamobile.com/getting-started/overview#solana-dapp-store). - -Most relevant to your development journey is the Mobile Wallet Adapter (MWA). -The simplest way to get started is to use the Mobile Wallet Adapter with React -Native to create a simple Android app. This lesson assumes you're familiar with -React and Solana programming. If that's not the case, -[start our course from the beginning](/content/courses/intro-to-solana/intro-to-cryptography) -and come back here when you feel ready! - -### Intro To Solana Mobile - -In these units, we'll develop mobile apps that interact with the Solana network. -This opens up a whole new paradigm of crypto use cases and behaviors. +[Solana app Store](https://docs.solanamobile.com/getting-started/overview#solana-app-store). +These resources simplify mobile development with a similar experience but with +mobile-specific features. + +This lesson focuses on using React Native to create a simple Android app that +integrates with the Solana network. If you're not familiar with programming in +React or Solana, we recommend starting with our +[Intro to Solana lesson](https://github.com/solana-foundation/developer-content/tree/main/content/courses/intro-to-solana) +and returning when you're ready. If you are, let's dive in! + +## Intro to Solana Mobile + +Native mobile wallets hold your private keys and use them to sign and send +transactions just like web extension wallets. However native mobile wallets use +the +[Mobile Wallet Adapter](https://github.com/solana-mobile/mobile-wallet-adapter) +(MWA) standard instead of the +[Wallet Adapter](https://github.com/anza-xyz/wallet-adapter) to ensure any apps +can work with any wallet. + +We will dig into the specifics of the MWA in a +[later lesson](/content/courses/mobile/mwa-deep-dive), but it effectively opens +a WebSocket between applications to facilitate communication. That way a +separate app can provide the wallet app with the transaction to be signed and +sent, and the wallet app can respond with appropriate status updates. -#### Solana Mobile Use Cases +### Mobile Use Cases with Solana -Here are a few examples of what Solana mobile development can unlock: +Before development, it is important to understand the current landscape of Web3 +mobile development to foresee potential blockers and opportunities. Here are a +few examples of what Solana mobile development can unlock: **Mobile Banking and Trading (DeFi)** @@ -66,55 +81,22 @@ SMS can enable a new wave of mobile e-commerce shoppers to pay directly from their favorite Solana wallet. Imagine a world where you can use your Solana wallet as seamlessly as you can use Apple Pay. -To summarize, mobile crypto opens up many doors. Let's dive in and learn how we -can be part of it: - -#### How Solana development differs between native mobile apps and web +In summary, mobile blockchain transactions can open many opportunities. Let's +start building! -Solana wallet interaction differs slightly on mobile compared to the web. The -core wallet functionality is the same: the wallet holds your private keys and -uses them to sign and send transactions. To avoid having different interfaces -between wallets, developers abstracted that functionality into the Solana Wallet -Adapter standard. This remains the standard on the web. The mobile counterpart -is the Mobile Wallet Adapter (MWA). +### Supported Operating Systems -The differences between the two standards are due to the different construction -of web vs mobile wallets. Web wallets are just browser extensions that inject -wallet adapter functions into the `window` object of your webpage. This gives -your site access to them. Mobile wallets, however, are native applications on a -mobile operating system. There's no way to surface functions from one native -application to another. The Mobile Wallet Adapter exists to enable any app, -written in any language, to connect to a native wallet app. +Currently, the MWA only supports Android. On Android, a WebSocket connection can +persist between apps, even when the wallet app is in the background. -We'll dig into the specifics of the Mobile Wallet Adapter in a -[later lesson](/content/courses/mobile/mwa-deep-dive), but it effectively opens -a WebSocket between applications to facilitate communication. That way a -separate app can provide the wallet app with the transaction to be signed and -sent, and the wallet app can respond with appropriate status updates. - -#### Supported Operating Systems - -At the time of writing, Android is the only mobile OS supported by the Mobile -Wallet Adapter. - -On Android, a WebSocket connection can persist between apps, even when the -wallet app is in the background. - -On iOS, the lifetime of a connection between apps is purposefully limited by the -operating system. Specifically, iOS will quickly suspend connections when an app -is pushed to the background. This kills the MWA WebSocket connection. This is an -inherent design difference between iOS and Android (probably made to preserve -battery, network usage, etc). - -However, this doesn't mean that Solana dApps can't run on iOS at all. You can -still create a Mobile Web App using the -[standard wallet adapter](https://github.com/solana-labs/wallet-adapter) -library. Your users can then install a mobile-friendly wallet like -the [Glow Wallet](https://glow.app/). +On iOS, the OS quickly suspends websocket connections when an app is +backgrounded, so the standard +[Wallet Adapter](https://github.com/solana-labs/wallet-adapter) library is used +instead. The remainder of this lesson will focus on developing Android apps with the MWA. -#### Supported Frameworks +### Supported Frameworks Solana Mobile supports a number of different frameworks. Officially supported are React Native and native Android, with community SDKs for Flutter, Unity, and @@ -132,297 +114,316 @@ Unreal Engine. - [Unity](https://docs.solanamobile.com/unity/unity_sdk) - [Unreal Engine](https://docs.solanamobile.com/unreal/unreal_sdk) -To keep the development experience as close as possible to other lessons, we'll -be working exclusively with React Native. - -### From React to React Native - -React Native takes the React web framework and applies it to mobile -applications. However, while React and React Native feel very similar, there are -differences. The best way to understand these differences is to experience them -while coding. But, to give you a head start here is a list of some differences -to keep in mind: - -- React Native compiles down to native iOS and Android applications while React - compiles down to a collection of web pages. -- In React, you use JSX to program with HTML and CSS. With React Native, you use - similar syntax to manipulate native UI components. It's more like using a UI - library like Chakra or Tailwind UI. Instead of `
`, `

`, and `` - you'll be using ``, ``, and ``. -- Interactions are different. Instead of `onClick`, you'll use `onPress` and - other gestures. -- Many standard React and Node packages may not be compatible with React Native. - Fortunately, there are React Native counterparts to the most popular libraries - and you can often use polyfills to make Node packages available. If you're not - familiar with polyfills, take a look at the - [MDN docs](https://developer.mozilla.org/en-US/docs/Glossary/Polyfill). In - short, polyfills actively replace Node-native libraries to make them work - anywhere Node is not running. -- Setting up a development environment in React Native can be challenging. This - will require setting up Android Studio to compile to Android and XCode for - iOS. React Native has a - [really good guide](https://reactnative.dev/docs/environment-setup?guide=native) - for this. -- For regular development and testing, you'll use a physical mobile device or an - emulator to run your code. This relies on a tool called Metro that comes - pre-installed. React Native's setup guide also covers this. -- React Native gives you access to the phone's hardware that React can't - provide. This includes things like the phone's camera, accelerometer, and - more. -- React Native introduces new config files and build folders. For example, the - `ios` and `android` directories contain platform-specific information. - Additionally, there are config files like `Gemfile` and `metro.config.js`. - Generally, leave all configurations alone and just worry about writing your - code, the starting point for which will be in `App.tsx`. - -There is a learning curve, but if you know React you're not nearly as far from -being able to develop mobile apps as you think! It may feel jarring to start, -but after a few hours of React Native development, you'll start to feel much -more comfortable. You'll likely feel much more confident even after -[this lesson's lab](#lab). - -### Creating a Solana dApp with React Native - -Solana React Native dApps are virtually identical to React dApps. The primary +To keep the development experience as close as possible to other lessons, we +will be working exclusively with React Native. + +## From React to React Native + +React Native is very similar to React but designed for mobile. Here are some key +points to note: + +- React Native compiles down to native Android and iOS apps while React compiles + down to a collection of web pages. +- Instead of using web elements like `

`, you will use mobile-native + elements like ``. +- React Native allows access to mobile hardware, such as the camera and + accelerometer, which React web apps cannot access. +- Many standard React and Node packages may not be compatible with React Native + and setting up React Native can be challenging. Fortunately, the + [React Native Docs](https://reactnative.dev/docs/environment-setup?guide=native) + contains everything you may need. +- For development, you will need to set up + [Android Studio](https://developer.android.com/studio/intro/) for Android apps + and an emulator or physical device for testing. + + +**NOTE:** There is a learning curve, but if you know React you're not nearly as far from being able to develop mobile apps as you think! It may feel jarring to start, but after a few hours of React Native development, you will start to feel much more comfortable. We have included a [Lab](#lab) section below to help you. + + +## Creating a React Native App on Solana + +Solana React Native apps are virtually identical to React apps. The primary difference is in the wallet interaction. Instead of the wallet being available -in the browser, your dApp will create an MWA session with the wallet app of your +in the browser, your app will create an MWA session with the wallet app of your choosing using a WebSocket. Fortunately, this is abstracted for you in the MWA -library. The only difference you'll need to know is anytime you need to make a -call to the wallet you'll be using the `transact` function, which we'll talk -about soon. +library. The only difference is that anytime you need to make a call to the +wallet, the `transact` function will be used, more details on this function in +later parts of this lesson. -![dApp Flow](/public/assets/courses/unboxed/basic-solana-mobile-flow.png) +![App Flow](/public/assets/courses/unboxed/basic-solana-mobile-flow.png) -#### Reading data +## Reading Data -Reading data from a Solana cluster in React Native is the exact same as in -React. You use the `useConnection` hook to grab the `Connection` object. Using -that, you can get account info. Since reading is free, we don't need to actually -connect to the wallet. +Reading data from a Solana cluster in React Native works the same way as in +React. You can use the `useConnection` hook to access the `connection` object, +which is responsible for interacting with the Solana network. -```tsx -const account = await connection.getAccountInfo(account); +In Solana, an account refers to any object stored on-chain, and is typically +referenced by a +[public key](https://solana.com/docs/terminology#public-key-pubkey). + +Here’s an example of how you can read an account information using the +`getAccountInfo` method: + +```javascript +const { connection } = useConnection(); +const publicKey = new PublicKey("your-wallet-public-key-here"); // Replace with a valid public key +const account = await connection.getAccountInfo(publicKey); ``` -If you need a refresher on this, check out our -[lesson on reading data from the blockchain](/content/courses/intro-to-solana/intro-to-reading-data). +> **NOTE:** If you need a refresher, refer to our +> [Intro to Reading Data lesson](/content/courses/intro-to-solana/intro-to-reading-data). -#### Connecting to a wallet +## Connecting to a Wallet -Writing data to the blockchain has to happen through a transaction. Transactions -have to be signed by one or more private keys and sent to an RPC provider. This -virtually always happens through a wallet application. +When writing data to the blockchain, it must be done through a **transaction**. +Transactions need to be signed by one or more secret keys (previously referred +to as private keys) and sent to an +[RPC provider](https://academy.subquery.network/subquery_network/node_operators/rpc_providers/introduction.html) +for processing. In almost all cases, this interaction is facilitated through a +wallet application. -Typical wallet interaction happens by calling out to a browser extension. On -mobile, you use a WebSocket to start an MWA session. Specifically, you use -Android intents where the dApp broadcasts its intent with the `solana-wallet://` -scheme. +### Web vs. Mobile Wallet Interactions +The websocket that connects the app and the wallet is managed using the MWA, and +initiated using **Android intents**, with the dApp broadcasting its intent using +the `solana-wallet://` scheme. ![Connecting](/public/assets/courses/unboxed/basic-solana-mobile-connect.png) -When the wallet app receives this intent, it opens a connection with the dApp -that initiated the session. Your dApp sends this intent using the `transact` -function: +When the wallet app receives the intent broadcast, it opens a WebSocket +connection with the app that initiated the session. The app initiates this +connection using the `transact` function, as shown below: ```tsx transact(async (wallet: Web3MobileWallet) => { - // Wallet Action code here -} + // Your wallet action code goes here +}); ``` -This will give you access to the `Web3MobileWallet` object. You can then use -this to send transactions to the wallet. Again, when you want to access the -wallet, it has to be through the function `transact` function's callback. +This function provides access to the `Web3MobileWallet` object, allowing you to +perform actions such as signing transactions or interacting with wallet data. +Remember, all wallet interactions must occur inside the callback of the +`transact` function. -#### Signing and sending transactions +### Signing and sending transactions -Sending a transaction happens inside the `transact` callback. The flow is as -follows: +The overall flow for signing and sending a transaction is as follows: -1. Establish a session with a wallet using `transact` which will have a callback - of `async (wallet: Web3MobileWallet) => {...}`. -2. Inside the callback, request authorization with the `wallet.authorize` or - `wallet.reauthorize` method depending on the state of the wallet. -3. Sign the transaction with `wallet.signTransactions` or sign and send with - `wallet.signAndSendTransactions`. +- Use the `transact` function to establish a session with the wallet. This + function takes an asynchronous callback: + `async (wallet: Web3MobileWallet) => {...}`. +- Inside the callback, request wallet authorization using `wallet.authorize()` + or `wallet.reauthorize()`, depending on the wallet's state (whether it has an + active session or requires reauthorization). +- Once the wallet is authorized, you can either: + - Sign the transaction using `wallet.signTransactions()`, or + - Sign and send the transaction directly using + `wallet.signAndSendTransactions()`. ![Transacting](/public/assets/courses/unboxed/basic-solana-mobile-transact.png) +To manage the wallet's authorization state, consider creating a +`useAuthorization()` hook. This hook can streamline the process of handling +authorization within your app, especially if you have multiple interactions with +the wallet. -You may want to create a `useAuthorization()` hook to -manage the wallet's authorization state. We'll practice this in the -[Lab](#lab). +> We will explore the use of this hook and practice managing the wallet's state +> in more detail during the lab exercises. Here is an example of sending a transaction using MWA: ```tsx +//import required dependencies if any + const { authorizeSession } = useAuthorization(); const { connection } = useConnection(); -const sendTransactions = (transaction: Transaction) => { - transact(async (wallet: Web3MobileWallet) => { - const latestBlockhashResult = await connection.getLatestBlockhash(); - const authResult = await authorizeSession(wallet); - - const updatedTransaction = new Transaction({ - ...transaction, - ...latestBlockhashResult, - feePayer: authResult.publicKey, - }); +const sendTransactions = async (transaction: Transaction) => { + try { + // Start a session with the wallet + await transact(async (wallet: Web3MobileWallet) => { + // Get the latest blockhash for the transaction + const { blockhash, lastValidBlockHeight } = + await connection.getLatestBlockhash(); + + // Authorize the wallet session + const authResult = await authorizeSession(wallet); + + // Create an updated transaction with the latest blockhash and feePayer + const updatedTransaction = new Transaction({ + recentBlockhash: blockhash, + feePayer: authResult.publicKey, + }).add(transaction); + + // Sign and send the transaction via the wallet + const signatures = await wallet.signAndSendTransactions({ + transactions: [updatedTransaction], + }); - const signature = await wallet.signAndSendTransactions({ - transactions: [transaction], + console.log(`Transaction successful! Signature: ${signatures[0]}`); }); - }); + } catch (error) { + console.error("Error sending transaction:", error); + throw new Error("Transaction failed"); + } }; ``` -#### Debugging - -Since two applications are involved in sending transactions, debugging can be -tricky. Specifically, you won't be able to see the wallet's debug logs the way -you can see your dApps logs. - -Fortunately, -[Logcat on Android Studio](https://developer.android.com/studio/debug/logcat) -makes it possible to see logs from all applications on your device. - -If you prefer not to use Logcat, the other method you could try is to only use -the wallet to sign transactions, and then send them in your code. This allows -you to better debug the transaction if you're running into problems. - -#### Releasing - -Deploying mobile applications can be difficult on its own. It's often even more -difficult when it's a crypto app. There are two main reasons for this: customer -safety and financial incentives. - -First, most of the mobile app marketplaces have policies restricting blockchain -involvement. Crypto is new enough that it's a regulatory wildcard. Platforms -feel they're protecting users by being strict with blockchain-related apps. - -Second, if you use crypto for "purchases" in-app, you'll be seen as -circumnavigating the platform's fee (anywhere from 15-30%). This is explicitly -against app store policies as the platform is trying to protect its revenue -stream. - -These are hurdles for sure, but there's hope. Here are some things to keep in -mind for each marketplace: - -- **App Store (iOS) -** We only talked about Android today for the technical MWA - reason. However, their policies are also some of the most strict and make it - hard for Solana dApps to exist. For now, Apple has some pretty strict - anti-crypto policies. Wallets seem to be fine, but they'll flag and likely - reject anything that seems like a purchase using crypto. -- **Google Play (Android) -** Google is generally more relaxed, but there are - still a few things to be aware of. As of this writing in November ‘23, Google - is rolling out - [new crypto policies](https://www.theverge.com/2023/7/12/23792720/android-google-play-blockchain-crypto-nft-apps) - to make it more clear what they will and will not allow. Take a look. -- **Steam -** Does not allow crypto games at all - > “built on blockchain technology that issue or allow the exchange of - > cryptocurrencies or NFTs.” -- **Download Sites / Your Site -** Depending on the target platform, you can - make your dApp available for download on your own site. However, most users - are wary of downloading mobile applications from websites. -- **dApp Store (Solana) -** Solana saw the issues with mobile dApp distribution - on other platform app stores and decided to make their own. As part of the SMS - stack, they created the - [Solana dApp Store](https://docs.solanamobile.com/getting-started/overview#solana-dapp-store). - -### Conclusion - -Getting started with mobile Solana development is fairly straightforward thanks -to SMS. While React Native is slightly different than React, the code you have -to write is more similar than different. The primary difference is that the -portion of your code that interacts with wallets will exist within the -`transact` callback. Remember to look at our other lessons if you need a -refresher on Solana development more broadly. - -## Lab - -Let's practice this together by building a simple Android mobile counter dApp -with React Native. The app will interact with the Anchor counter program that we -made in the -[Intro to client-side Anchor development](https://www.soldev.app/course/intro-to-anchor-frontend) -lesson. This dApp simply displays a counter and allows users to increment the -count through a Solana program. In this app, we'll be able to see the current -count, connect our wallet, and increment the count. We'll be doing this all on -Devnet and will be compiling only for Android. - -This program already exists and is already deployed on Devnet. Feel free to -check out the -[deployed program's code](https://github.com/Unboxed-Software/anchor-ping-frontend/tree/solution-decrement) -if you want more context. - -We'll write this application in vanilla React Native without a starting -template. Solana Mobile provides a -[React Native template](https://docs.solanamobile.com/react-native/react-native-scaffold) -that shortcuts some of the boilerplate, but there's no better way to learn than -to do something from scratch. - -#### 0. Prerequisites - -React Native allows us to write mobile applications using similar patterns as -React. However, under the hood, our React code needs to be compiled down to -languages and frameworks that work with the device's native OS. This requires a -few prerequisite setup items: - -1. [Set up a React Native dev environment](https://reactnative.dev/docs/environment-setup?guide=native#creating-a-new-application). - Go through the - [**_entire article_**](https://reactnative.dev/docs/environment-setup?guide=native#creating-a-new-application), - using Android as the target OS. For convenience, we've typed out the - high-level steps below. Keep in mind that the source article might change - from the time of writing to when you're reading this. The source article is - your source of truth here. - - 1. Install dependencies - 2. Install Android Studio - 3. Configure **ANDROID_HOME** environment variable - 4. Create a new sample project (this is only used to set up the emulator) - - 1. If you run into the error `✖ Copying template`, add the `--npm` flag - at the end - - ```bash - npx react-native@latest init AwesomeProject - ✔ Downloading template - ✖ Copying template - - npx react-native@latest init AwesomeProject --npm - ✔ Downloading template - ✔ Copying template - ``` - - 5. Run and compile the sample project on your emulator - -2. Install and run the Solana fake wallet - - 1. Install the repo - - ```bash - git clone https://github.com/solana-mobile/mobile-wallet-adapter.git - ``` - - 2. In Android - Studio, `Open project > Navigate to the cloned directory > Select mobile-wallet-adapter/android` - 3. After Android Studio finishes loading the project, select `fakewallet` in - the build/run configuration dropdown in the top right - - ![Fake Wallet](/public/assets/courses/unboxed/basic-solana-mobile-fake-wallet.png) - - 4. For debugging, you'll want to use `Logcat`. Now that your fake wallet is - running on the emulator, go to `View -> Tool Windows -> Logcat`. This will - open up a console logging out what's happening with fake wallet. - -3. (Optional) Install other - [Solana wallets](https://solana.com/ecosystem/explore?categories=wallet) on - the Google Play store. - -Lastly, if you run into Java versioning issues - you'll want to be on Java -version 11. To check what you're currently running type `java --version` in your -terminal. - -#### 1. Plan out the App's Structure +## Debugging + +Debugging can be challenging when working with Solana mobile transactions, as +two separate applications are involved: your app and the mobile wallet. Unlike +typical single-application setups, you won't have direct access to the wallet’s +logs, which makes tracking issues more complex. + +However, Android Studio’s +[Logcat](https://developer.android.com/studio/debug/logcat) provides a useful +solution - enabling you to view logs from all applications running on your +device including the wallet. By leveraging Logcat, you can monitor the +interaction between your app and the wallet, helping you identify any issues +that arise during transaction signing and submission. + +If Logcat is not your preferred tool, an alternative approach is to use the +wallet solely for signing transactions, while handling the actual transaction +submission in your app’s code. This method allows for greater control over +debugging, as you can inspect the transaction flow more thoroughly on the client +side. + +## Deploying for Solana Mobile + +Deploying mobile applications can be challenging, and the complexity increases +when dealing with blockchain-based apps. Two primary factors contribute to this +difficulty: customer safety and financial incentives. + +### Customer Safety and Regulatory Uncertainty: + +Most mobile app marketplaces, such as the Apple App Store and Google Play Store, +have policies that restrict blockchain-related apps. Since blockchain is still a +relatively new and evolving technology, platforms are cautious about regulatory +compliance. They often adopt strict guidelines to protect users from potential +risks associated with blockchain apps. + +### In-App Purchases and Platform Fees: + +Another significant challenge arises when using blockchain transactions for +in-app purchases. Many platforms impose a transaction fee on purchases made +within their apps (ranging from 15% to 30%). Payment via the blockchain is often +seen as a way to bypass these fees, which is explicitly prohibited by most app +stores. These platforms prioritize protecting their revenue streams and +therefore enforce strict policies against apps that facilitate blockchain +payments for in-app purchases. + +> While traditional app stores impose strict policies around blockchain +> transactions to protect their revenue and comply with regulations, alternative +> distribution methods like the Solana app Store offers developers a more +> flexible platform for deploying Solana-based mobile applications. This +> decentralized approach bypasses many of the restrictions seen in centralized +> app marketplaces, allowing apps to thrive in a more blockchain-friendly +> ecosystem. + +## Conclusion + +Getting started with Solana mobile development is more accessible than ever, +thanks to the Solana Mobile Stack (SMS). Although React Native introduces some +differences compared to React, much of the code you will write remains familiar, +particularly when it comes to structuring the UI and handling state. The main +distinction lies in how you interact with wallets, which requires using the +`transact` callback to establish wallet sessions, sign transactions, and +communicate with Solana’s blockchain. + +As you continue building Solana mobile apps, it's essential to keep learning and +refining your skills. Be sure to explore additional resources like: + +- [The official Solana Developer Docs](https://solana.com/docs) for in-depth + guides on Solana’s core libraries and best practices. + +- [Solana Stack Exchange](https://solana.stackexchange.com/) forum for + troubleshooting, sharing insights, and staying updated on the latest ecosystem + changes. + +Mastering mobile Solana development will open up new opportunities in +decentralized finance (DeFi), gaming, and e-commerce, allowing you to build +cutting-edge applications with a seamless user experience. Stay curious and +experiment with different tools to push the boundaries of what you can achieve +with mobile apps. Let's put our knowledge to test by building a counting app +with React Native for Android OS! + +## Lab: Building a Mobile Counter app with React Native + +This app will display a counter and allow users to make increments via a +transaction on the Solana blockchain. The app will also connect to a wallet for +signing transactions. + +We will use the **Anchor framework** to interact with the on-chain counter +program. The client side has already been developed in one of our previous +lessons called +[Intro to client-side Anchor development](https://solana.com/developers/courses/onchain-development/intro-to-anchor-frontend), +feel free to check out its code for more context. + +To ensure you fully understand the core concepts, we will write this application +in vanilla React Native without a starting template. While Solana Mobile offers +templates that handle some boilerplate, building from scratch provides a much +deeper understanding. + +### Getting Started + +To get started, you will need to properly set up a React Native development +environment if you didn't already. This +[article](https://reactnative.dev/docs/set-up-your-environment) shows you how. +Remember that this step is not required if you are using a +[Framework](https://reactnative.dev/architecture/glossary#react-native-framework). + +Ensure you have [Node.js](https://nodejs.org/en/download) installed on your +system. These will manage your JavaScript packages. Install Android Studio: + +Android Studio is required to run the Android emulator and to compile your React +Native app for Android devices. Configure the ANDROID_HOME Environment Variable: + +> **NOTE:** You will need to configure the `ANDROID_HOME` environment variable +> so that your terminal can recognize Android’s SDK tools. This step is critical +> for running and building your app on Android. + +## Project Setup + +Create a Sample Project for the Emulator Setup to ensure your Android +environment is set up correctly. In your terminal, run the code below within +your preferred directory to scaffold a new React Native project, where +`SampleProject` is your preferred project name. You can open the project in +Android Studio and ensure it runs correctly on the Android emulator. + +```bash + npx react-native init SampleProject --npm +``` + +### Cloning and Running MWA + +1. Clone the repo in `SampleProject` + + ```bash + git clone https://github.com/solana-mobile/mobile-wallet-adapter.git + ``` + +2. In Android Studio, _Open project > Navigate to the cloned directory > Select + mobile-wallet-adapter/android_ +3. After Android Studio finishes loading the project, select `fakewallet` in + the build/run configuration dropdown in the top right + + ![Fake Wallet](/public/assets/courses/unboxed/basic-solana-mobile-fake-wallet.png) + +4. For easier debugging, use **Logcat**. Check the + [Logcat installation guide](https://developer.android.com/studio/debug/logcat) + if you are interested. +5. Now that your fake wallet is running on the emulator, go to _View -> Tool + Windows -> Logcat_. This will open up a console logging out what’s happening + with fake wallet. + +6. (Optional) Install other + [Solana wallets](https://play.google.com/store/search?q=solana%20wallet&c=apps) + on the Google Play store. + +Lastly, we recommend installing _java version 11_ to avoid dependency errors. To +know what version you have installed, run `java --version` in your terminal. + +### 1. Plan out the App Structure Before we do any coding, let's conceptualize the outline of the app. Again, this app will connect to and interact with the counter program we've already deployed @@ -437,7 +438,7 @@ to Devnet. To do this, we'll need the following: There will be more files and considerations, but these are the most important files we'll be creating and working with. -#### 2. Create the App +### 2. Create the App Now that we've got some of the basic setup and structure down, let's scaffold a new app with the following command: @@ -457,35 +458,36 @@ npm run android ``` This should open and run the app in your Android emulator. If you run into -problems, check to make sure you've accomplished everything in the -[prerequisites section](#0-prerequisites). +problems, check to make sure you’ve accomplished everything in the +[_Getting Started_](#getting-started) section. -#### 3. Install Dependencies +### 3. Install Dependencies -We'll need to add in our Solana dependencies. +We will need to import our Solana dependencies. [The Solana Mobile docs provide a nice list of packages](https://docs.solanamobile.com/react-native/setup) and explanations for why we need them: - `@solana-mobile/mobile-wallet-adapter-protocol`: A React Native/Javascript API enabling interaction with MWA-compatible wallets - `@solana-mobile/mobile-wallet-adapter-protocol-web3js`: A convenience wrapper - to use common primitives - from [@solana/web3.js](https://github.com/solana-labs/solana-web3.js), such - as `Transaction` and `Uint8Array` + to use common primitives from + [@solana/web3.js](https://github.com/solana-labs/solana-web3.js), such as + `Transaction` and `Uint8Array` - `@solana/web3.js`: Solana Web Library for interacting with the Solana network - through the [JSON RPC API](/docs/rpc/http/index.mdx) -- `react-native-get-random-values` Secure random number generator polyfill - for `web3.js` underlying Crypto library on React Native -- `buffer`: Buffer polyfill; also needed for `web3.js` on React Native + through th + [JSON RPC API](https://github.com/solana-foundation/developer-content/blob/main/docs/rpc/http/index.mdx) +- `@react-native-get-random-values` Secure random number generator polyfill for +- `web3.js` underlying library on React Native +- `buffer`: Buffer polyfill; also needed for `web3.js` on React Native -In addition to this list, we'll add two more packages: +In addition to this list, we will add two more packages: - `@coral-xyz/anchor`: The Anchor TS client. - `assert`: A polyfill that lets Anchor do its thing. - `text-encoding-polyfill`: A polyfill needed to create the `Program` object -If you're not familiar: polyfills actively replace Node-native libraries to make -them work anywhere Node is not running. We'll finish our polyfill setup shortly. +If you’re not familiar: polyfills provide Node-native libraries to make them +work anywhere Node is not running. We will finish our polyfill setup shortly. For now, install dependencies using the following command: ```bash @@ -500,7 +502,7 @@ npm install \ text-encoding-polyfill ``` -#### 4. Create ConnectionProvider.tsx +### 4. Create ConnectionProvider.tsx file Let's start adding our Solana functionality. Create a new folder called `components` and within it, a file called `ConnectionProvider.tsx`. This @@ -526,12 +528,11 @@ const ConnectionContext = createContext( {} as ConnectionContextState, ); -export function ConnectionProvider(props: ConnectionProviderProps) { - const { - children, - endpoint, - config = { commitment: "confirmed" }, - } = { ...props }; +export function ConnectionProvider({ + children, + endpoint, + config = { commitment: "confirmed" }, +}: ConnectionProviderProps) { const connection = useMemo( () => new Connection(endpoint, config), [config, endpoint], @@ -548,11 +549,11 @@ export const useConnection = (): ConnectionContextState => useContext(ConnectionContext); ``` -#### 5. Create AuthProvider.tsx +### 5. Create AuthProvider.tsx file -The next Solana provision we'll need is the auth provider. This is one of the -main differences between mobile and web development. What we're implementing -here is roughly equivalent to the `WalletProvider` that we're used to in web +The next Solana provision we will need is the **auth provider**. This is one of +the main differences between mobile and web development. What we’re implementing +here is roughly equivalent to the `WalletProvider` that we’re used to in web apps. However, since we're using Android and its natively installed wallets, the flow to connect and utilize them is a bit different. Most notably, we need to follow the MWA protocol. @@ -570,20 +571,20 @@ We do this by providing the following in our `AuthProvider`: - `deauthorizeSession(wallet)`: Deauthorizes the `wallet`. - `onChangeAccount`: Acts as a handler when `selectedAccount` is changed. -We're also going to throw in some utility methods: +We are also going to throw in some utility methods: - `getPublicKeyFromAddress(base64Address)`: Creates a new Public Key object from the Base64 address given from the `wallet` object - `getAuthorizationFromAuthResult`: Handles the authorization result, extracts relevant data from the result, and returns the `Authorization` context object -We'll expose all of this through a `useAuthorization` hook. +We will expose all of this through a `useAuthorization` hook. -Since this provider is the same across virtually all apps, we're going to give -you the full implementation that you can copy/paste. We'll dig into the details -of MWA in a future lesson. +Since this provider is the same across all apps, we are going to give you the +full implementation that you can copy and paste. We will dig into the details of +MWA in a future lesson. -Create the file `AuthProvider.tsx` in the `components` and paste in the +Create the file `AuthProvider.tsx` in the `components` folder and paste in the following: ```tsx @@ -601,25 +602,19 @@ import { toUint8Array } from "js-base64"; import { useState, useCallback, useMemo, ReactNode } from "react"; import React from "react"; -export const AuthUtils = { +const AuthUtils = { getAuthorizationFromAuthResult: ( authResult: AuthorizationResult, previousAccount?: Account, ): Authorization => { - let selectedAccount: Account; - if ( - //no wallet selected yet - previousAccount === null || - //the selected wallet is no longer authorized + const selectedAccount = + previousAccount === undefined || !authResult.accounts.some( ({ address }) => address === previousAccount.address, ) - ) { - const firstAccount = authResult.accounts[0]; - selectedAccount = AuthUtils.getAccountFromAuthorizedAccount(firstAccount); - } else { - selectedAccount = previousAccount; - } + ? AuthUtils.getAccountFromAuthorizedAccount(authResult.accounts[0]) + : previousAccount; + return { accounts: authResult.accounts.map( AuthUtils.getAccountFromAuthorizedAccount, @@ -631,19 +626,13 @@ export const AuthUtils = { getAccountFromAuthorizedAccount: ( authAccount: AuthorizedAccount, - ): Account => { - return { - ...authAccount, - publicKey: AuthUtils.getPublicKeyFromAddress(authAccount.address), - }; - }, - - getPublicKeyFromAddress: (address: Base64EncodedAddress) => { - return new PublicKey(toUint8Array(address)); - }, + ): Account => ({ + ...authAccount, + publicKey: new PublicKey(toUint8Array(authAccount.address)), + }), }; -export type Account = Readonly<{ +type Account = Readonly<{ address: Base64EncodedAddress; label?: string; publicKey: PublicKey; @@ -655,11 +644,11 @@ type Authorization = Readonly<{ selectedAccount: Account; }>; -export const AppIdentity = { +const APP_IDENTITY = { name: "Solana Counter Incrementor", }; -export type AuthorizationProviderContext = { +type AuthorizationProviderContext = { accounts: Account[] | null; authorizeSession: (wallet: AuthorizeAPI & ReauthorizeAPI) => Promise; deauthorizeSession: (wallet: DeauthorizeAPI) => void; @@ -669,25 +658,24 @@ export type AuthorizationProviderContext = { const AuthorizationContext = React.createContext({ accounts: null, - authorizeSession: (_wallet: AuthorizeAPI & ReauthorizeAPI) => { + authorizeSession: () => { throw new Error("Provider not initialized"); }, - deauthorizeSession: (_wallet: DeauthorizeAPI) => { + deauthorizeSession: () => { throw new Error("Provider not initialized"); }, - onChangeAccount: (_nextSelectedAccount: Account) => { + onChangeAccount: () => { throw new Error("Provider not initialized"); }, selectedAccount: null, }); -export type AuthProviderProps = { +type AuthProviderProps = { children: ReactNode; cluster: Cluster; }; -export function AuthorizationProvider(props: AuthProviderProps) { - const { children, cluster } = { ...props }; +function AuthorizationProvider({ children, cluster }: AuthProviderProps) { const [authorization, setAuthorization] = useState( null, ); @@ -699,55 +687,47 @@ export function AuthorizationProvider(props: AuthProviderProps) { authorization?.selectedAccount, ); setAuthorization(nextAuthorization); - return nextAuthorization; }, - [authorization, setAuthorization], + [authorization], ); const authorizeSession = useCallback( async (wallet: AuthorizeAPI & ReauthorizeAPI) => { - const authorizationResult = await (authorization - ? wallet.reauthorize({ + const authorizationResult = authorization + ? await wallet.reauthorize({ auth_token: authorization.authToken, - identity: AppIdentity, + identity: APP_IDENTITY, }) - : wallet.authorize({ cluster, identity: AppIdentity })); + : await wallet.authorize({ cluster, identity: APP_IDENTITY }); return (await handleAuthorizationResult(authorizationResult)) .selectedAccount; }, - [authorization, handleAuthorizationResult], + [authorization, cluster, handleAuthorizationResult], ); const deauthorizeSession = useCallback( async (wallet: DeauthorizeAPI) => { - if (authorization?.authToken === null) { - return; + if (authorization?.authToken) { + await wallet.deauthorize({ auth_token: authorization.authToken }); + setAuthorization(null); } - - await wallet.deauthorize({ auth_token: authorization.authToken }); - setAuthorization(null); }, - [authorization, setAuthorization], + [authorization], ); - const onChangeAccount = useCallback( - (nextAccount: Account) => { - setAuthorization(currentAuthorization => { - if ( - //check if the account is no longer authorized - !currentAuthorization?.accounts.some( - ({ address }) => address === nextAccount.address, - ) - ) { - throw new Error(`${nextAccount.address} is no longer authorized`); - } - + const onChangeAccount = useCallback((nextAccount: Account) => { + setAuthorization(currentAuthorization => { + if ( + currentAuthorization?.accounts.some( + ({ address }) => address === nextAccount.address, + ) + ) { return { ...currentAuthorization, selectedAccount: nextAccount }; - }); - }, - [setAuthorization], - ); + } + throw new Error(`${nextAccount.address} is no longer authorized`); + }); + }, []); const value = useMemo( () => ({ @@ -767,21 +747,28 @@ export function AuthorizationProvider(props: AuthProviderProps) { ); } -export const useAuthorization = () => React.useContext(AuthorizationContext); +const useAuthorization = () => React.useContext(AuthorizationContext); + +export { + AuthorizationProvider, + useAuthorization, + type Account, + type AuthProviderProps, + type AuthorizationProviderContext, +}; ``` -#### 6. Create ProgramProvider.tsx +### 6. Create ProgramProvider.tsx file The last provider we need is our program provider. This will expose the counter program we want to interact with. -Since we're using the Anchor TS client to interact with our program, we need the -program's IDL. Start by creating a root-level folder called `models`, then -create a new file `anchor-counter.ts`. Paste the contents of the -[Anchor Counter IDL](/public/assets/courses/unboxed/counter-rn-idl.ts) into this -new file. +Since we are using the Anchor TS client to interact with our program, we need +the program's IDL. Start by creating a root-level folder called `models`, then +create a new file `anchor-counter.ts`. Paste the contents of the Anchor Counter +IDL into this new file. -Next, create the file `ProgramProvider.tsx` inside of `components`. Inside we'll +Next, create the file `ProgramProvider.tsx` inside of components. Inside we will create the program provider to surface our program and the counter PDA: ```tsx @@ -820,8 +807,7 @@ export type ProgramProviderProps = { children: ReactNode; }; -export function ProgramProvider(props: ProgramProviderProps) { - const { children } = props; +export function ProgramProvider({ children }: ProgramProviderProps) { const { connection } = useConnection(); const [program, setProgram] = useState | null>(null); const [counterAddress, setCounterAddress] = useState(null); @@ -831,6 +817,11 @@ export function ProgramProvider(props: ProgramProviderProps) { "ALeaCzuJpZpoCgTxMjJbNjREVqSwuvYFRZUfc151AKHU", ); + // MockWallet is a placeholder wallet used for initializing the AnchorProvider. + // In a mobile app, we don't need a real wallet here because the actual signing + // will be done by the user's mobile wallet app. This mock wallet allows us to + // set up the provider without a real wallet instance. + const MockWallet = { signTransaction: () => Promise.reject(), signAllTransactions: () => Promise.reject(), @@ -875,7 +866,7 @@ export function ProgramProvider(props: ProgramProviderProps) { export const useProgram = () => useContext(ProgramContext); ``` -#### 7. Modify App.tsx +### 7. Modify App.tsx file Now that we have all our providers, let's wrap our app with them. We're going to re-write the default `App.tsx` with the following changes: @@ -907,11 +898,14 @@ export default function App() { const endpoint = clusterApiUrl(cluster); return ( + // ConnectionProvider: Manages the connection to the Solana network + // AuthorizationProvider: Handles wallet authorization + // ProgramProvider: Provides access to the Solana program @@ -921,7 +915,7 @@ export default function App() { } ``` -#### 8. Create MainScreen.tsx +### 8. Create MainScreen.tsx file Now, let's put everything together to create our UI. Create a new folder called `screens` and a new file called `MainScreen.tsx` inside of it. In this file, we @@ -935,47 +929,45 @@ to CSS. In `screens/MainScreen.tsx` paste the following: ```tsx +import React from "react"; import { StatusBar, StyleSheet, View } from "react-native"; import { CounterView } from "../components/CounterView"; import { CounterButton } from "../components/CounterButton"; -import React from "react"; -const mainScreenStyles = StyleSheet.create({ +export function MainScreen() { + return ( + + + + + + + + + + ); +} + +const styles = StyleSheet.create({ container: { height: "100%", width: "100%", backgroundColor: "lightgray", }, - - incrementButtonContainer: { position: "absolute", right: "5%", bottom: "3%" }, + incrementButtonContainer: { + position: "absolute", + right: "5%", + bottom: "3%", + }, counterContainer: { alignContent: "center", alignItems: "center", justifyContent: "center", }, }); - -export function MainScreen() { - return ( - - - - - - - - - - ); -} ``` -#### 9. Create CounterView.tsx +### 9. Create CounterView.tsx file The `CounterView` is the first of our two program-specific files. `CounterView`'s only job is to fetch and listen for updates on our `Counter` @@ -1045,7 +1037,7 @@ export function CounterView() { } ``` -#### 10. Create CounterButton.tsx +### 10. Create CounterButton.tsx file Finally, we have our last component, the `CounterButton`. This floating action button will do the following in a new function `incrementCounter`: @@ -1181,7 +1173,7 @@ export function CounterButton() { } ``` -#### 11. Build and Run +### 11. Build and Run Now it's time to test that everything works! Build and run with the following command: @@ -1200,7 +1192,7 @@ test your app: If you run into problems, here are some examples of what they could be and how to fix them: -- Application does not build → Exit Metro with ctrl+c and try again +- Application does not build → Exit Metro with _Ctrl+C_ and try again - Nothing happens when you press the `CounterButton` → Make sure you have Solana wallet installed ( like the fake wallet we installed in Prerequisites ) - You get stuck in a forever loop while calling `increment` → This is likely due @@ -1208,22 +1200,26 @@ to fix them: `CounterButton` and manually send some Devnet sol to your wallet's address (printed in the console) -That's it! You've made your first Solana Mobile dApp. If you get stuck, feel -free to check out the -[full solution code](https://github.com/Unboxed-Software/solana-react-native-counter) +That's it! You've made your first Solana Mobile app. If you get stuck, feel free +to check out the + +[full solution code](https://github.com/solana-developers/react-native-counter) on the `main` branch of the repository. ## Challenge -Your challenge today is to take our app and add a decrement function. Simply add -another button and call the `decrement` function on our program. This -instruction already exists on the program and its IDL, so you simply need to -write client code to call it. +Your next challenge is to expand the app by adding a `decrement` function. You +need to create another button that will call the `decrement` method on the +Solana program. The logic for the decrement function already exists in the +program’s **IDL** (**Interface Description Language**), so your task is to write +the client-side code that interacts with it. + +Once you've completed this, you can check your solution against the solution +code available on the -After you give it a try on your own, feel free to take a look at the -[solution code on the `solution` branch](https://github.com/Unboxed-Software/solana-react-native-counter/tree/solution). +[solution branch](https://github.com/solana-developers/react-native-counter). - -Push your code to GitHub and -[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=c15928ce-8302-4437-9b1b-9aa1d65af864)! + +If you’ve successfully completed the lab, push your code to GitHub and share +your feedback on this lesson through this [form](https://form.typeform.com/to/IPH0UGz7#answers-lesson=c15928ce-8302-4437-9b1b-9aa1d65af864) diff --git a/content/courses/mobile/metadata.yml b/content/courses/mobile/metadata.yml index f0dff73dd..0d22afc5b 100644 --- a/content/courses/mobile/metadata.yml +++ b/content/courses/mobile/metadata.yml @@ -7,7 +7,3 @@ lessons: - mwa-deep-dive - solana-mobile-dapps-with-expo priority: 99 -# Uses deprecated @metaplex-foundation/js library for NFTs -# which also uses old RPC methods and no longer functions. -# TODO: Superteam to update -isHidden: true diff --git a/content/courses/mobile/mwa-deep-dive.md b/content/courses/mobile/mwa-deep-dive.md index 08d232afe..0ceaa2fd7 100644 --- a/content/courses/mobile/mwa-deep-dive.md +++ b/content/courses/mobile/mwa-deep-dive.md @@ -23,10 +23,10 @@ description: ## Lesson Wallets exist to protect your secret keys. While some applications might have -app-specific keys, many crypto use cases rely on a single identity used across -multiple apps. In these cases, you very much want to be careful about how you -expose signing across these apps. You don't want to share your secret key with -all of them, which means you need a standard for allowing apps to submit +app-specific keys, many blockchain use cases rely on a single identity used +across multiple apps. In these cases, you very much want to be careful about how +you expose signing across these apps. You don't want to share your secret key +with all of them, which means you need a standard for allowing apps to submit transactions for signature to a secure wallet app that holds your secret key. This is where the Mobile Wallet Adapter (MWA) comes in. It's the transport layer to connect your mobile dApps to your wallet. @@ -41,7 +41,7 @@ app-wallet connection differently. At its core, a wallet app is fairly straightforward. It's a secure wrapper around your keypair. External applications can request that the wallet sign -transactions without ever having access to your private key. Both the web and +transactions without ever having access to your secret key. Both the web and mobile wallet adapters define this interaction for their respective platforms. #### How does a web wallet work? @@ -198,7 +198,7 @@ transact(async (wallet: Web3MobileWallet) => { Note that the above example does not handle errors or user rejections. In production, it's a good idea to wrap the authorization state and methods with a custom `useAuthorization` hook. For reference, we built this -[in the previous lesson](/content/courses/mobile/intro-to-solana-mobile). +[in the previous lesson](/content/courses/mobile/intro-to-solana-mobile.md). #### Interact with a wallet @@ -450,7 +450,7 @@ app-wallet relationship. Before we start programming our wallet, we need to do some setup. You will need a React Native development environment and a Solana dApp to test on. If you have completed the -[Introduction to Solana Mobile lab](/content/courses/mobile/intro-to-solana-mobile), +[Introduction to Solana Mobile lab](/content/courses/mobile/intro-to-solana-mobile.md), both of these requirements should be met and the counter app installed on your Android device/emulator. @@ -598,8 +598,8 @@ provider should generate and store a keypair. The `WalletProvider` will then return its context including the `wallet` and `connection`. The rest of the app can access this context using the `useWallet()` hook. -**_AGAIN_**, async storage is not fit to store private keys in production. -Please use something like +**_AGAIN_**, async storage is not fit to store secret keys in production. Please +use something like [Android's keystore system](https://developer.android.com/privacy-and-security/keystore). Let's create the `WalletProvider.tsx` within a new directory named `components`: @@ -1625,7 +1625,7 @@ request types: `SignMessagesRequest` and `SignTransactionsRequest`. Try to do this without help as it's great practice, but if you get stuck, check out the -[solution code on the `solution` branch](https://github.com/solana-developers/react-native-fake-solana-wallet/tree/solution). +[solution code on the repo](https://github.com/solana-developers/react-native-fake-solana-wallet). Push your code to GitHub and diff --git a/content/courses/mobile/solana-mobile-dapps-with-expo.md b/content/courses/mobile/solana-mobile-dapps-with-expo.md index 16ee816f3..27006dd07 100644 --- a/content/courses/mobile/solana-mobile-dapps-with-expo.md +++ b/content/courses/mobile/solana-mobile-dapps-with-expo.md @@ -11,10 +11,10 @@ description: "How to use Solana in your Expo apps." - Expo is an open-source collection of tools and libraries that wrap around React Native, much like Next.js is a framework built on top of React. -- In addition to simplifying the build/deploy process, Expo provides packages - that give you access to mobile devices' peripherals and capabilities. -- A lot of Solana ecosystem libraries don't support React native out of the box, - but you can typically use them with the right +- Along with simplifying the build and deploy process, Expo offers packages that + allow access to mobile device peripherals and capabilities. +- Many Solana ecosystem libraries don't natively support React Native, but you + can often use them with the appropriate [polyfills](https://developer.mozilla.org/en-US/docs/Glossary/Polyfill). ## Lesson @@ -37,8 +37,9 @@ lesson will be spent in the lab. ### React Native Expo -Expo is an open-source collection of tools and libraries that wrap around React -Native, much like Next.js is a framework built on top of React. +Expo is an open-source platform for making universal native apps for Android, +iOS, and the web that wraps around React Native, much like Next.js is a +framework built on top of React. Expo consists of three main parts: @@ -46,15 +47,15 @@ Expo consists of three main parts: 2. The Expo Go App 3. A suite of libraries that grant access to various mobile device capabilities. -The Expo CLI is a build and debugging tool that helps make all of the magic -happen. Chances are, you'll only have to interact with it when you're building -or starting a development server. It just works. +The Expo CLI is a powerful tool for building and debugging that simplifies the +development process. Chances are, you'll only have to interact with it when +you're building or starting a development server. It just works. The [Expo Go App](https://expo.dev/client) is a really cool piece of tech that allows _most_ apps to be developed without using an emulator or physical device. You download the app, you scan the QR from the build output and then you have a -working dev environment right on your phone. Unfortunately, this will not work -with the Solana mobile SDK. Coming from the +working dev environment right on your phone. However, this doesn't work with the +Solana Mobile SDK. Coming from the [Solana Expo setup article](https://docs.solanamobile.com/react-native/expo): > The traditional Expo Go development flow is only limited to certain @@ -64,21 +65,22 @@ with the Solana mobile SDK. Coming from the > fully compatible with Expo. Lastly, and most importantly, Expo does an amazing job providing -[easy-to-use libraries](https://docs.expo.dev/versions/latest/) that give you +[comprehensive libraries](https://docs.expo.dev/versions/latest/) that give you access to the device's onboard peripherals, such as camera, battery, and speakers. The libraries are intuitive and the documentation is phenomenal. #### How to create an Expo app -To get started with Expo, you first need the prerequisite setup described in the -[Introduction to Solana Mobile lesson](/content/courses/mobile/intro-to-solana-mobile). +To begin using Expo, first follow the setup instructions described in the +Getting Started section of the +[Introduction to Solana Mobile lesson](/content/courses/mobile/intro-to-solana-mobile.md). After that, you'll want to sign up for an -[Expo Application Services (EAS) account](https://expo.dev/). +[Expo Application Services (EAS) account](https://expo.dev/eas). Once you have an EAS account, you can install the EAS CLI and log in: ```bash -npm install --global eas-cli +npm install -g eas-cli eas login ``` @@ -102,7 +104,7 @@ the following inside this file: ```json { "cli": { - "version": ">= 5.2.0" + "version": ">= 5.12.0" }, "build": { "development": { @@ -120,13 +122,14 @@ the following inside this file: } ``` -With the EAS config file created, you can build using the -`npx eas build --local` command plus relevant flags for any additional -requirements. For example, the following will build the project locally with a -development profile specifically for Android: +With the EAS configuration file in place, you can build your project using +`eas build`. This submits a job to the EAS Build service, where your APK is +built using Expo's cloud infrastructure. If you want to build locally, you can +add the `--local` flag. For example, the following command builds the project +locally with a development profile specifically for Android: ```bash -npx eas build --profile development --platform android --local +eas build --profile development --platform android --message "Developing on Android!" --local ``` You then need to install the output APK to your device or emulator. If you're @@ -168,8 +171,12 @@ JS/TS. import { Pedometer } from "expo-sensors"; ``` -Depending on the package, there may be additional setup required. Be sure to -read the [docs](https://docs.expo.dev/versions/latest/) when working with a new +Depending on the package, there may be additional setup required. For example, +if you're using the `expo-camera` package, you not only need to install the +package but also configure the appropriate permissions in your `app.json` or +`AndroidManifest.xml` file for Android and request runtime permissions for +accessing the camera. Be sure to read the +[Expo docs](https://docs.expo.dev/versions/latest/) when working with a new package. ### Integrate ecosystem libraries into your Expo app @@ -204,8 +211,10 @@ For a Solana + Expo app, you'll need the following: as `Transaction` and `Uint8Array`. - `@solana/web3.js`: Solana Web Library for interacting with the Solana network through the [JSON RPC API](/docs/rpc/http/index.mdx). -- `react-native-get-random-values`: Secure random number generator polyfill - for `web3.js` underlying Crypto library on React Native. +- `expo-crypto` is a secure random number generator polyfill used in React + Native for web3.js's underlying Crypto library. This feature is supported only + in Expo SDK version 49+ and requires Expo Router. Make sure your setup is + updated to meet these requirements. - `buffer`: Buffer polyfill needed for `web3.js` on React Native. #### Metaplex Polyfills @@ -213,37 +222,32 @@ For a Solana + Expo app, you'll need the following: If you want to use the Metaplex SDK, you'll need to add the Metaplex library plus a few additional polyfills: -- `@metaplex-foundation/js@0.19.4` - Metaplex Library +- `@metaplex-foundation/umi` `@metaplex-foundation/umi-bundle-defaults` + `@metaplex-foundation/mpl-core` - Metaplex Library - Several more polyfills - `assert` - - `util` - `crypto-browserify` - - `stream-browserify` - `readable-stream` - - `browserify-zlib` - - `path-browserify` - - `react-native-url-polyfill` - -All of the libraries that the above polyfills are meant to replace are utilized -by the Metaplex library in the background. It's unlikely you'll be importing any -of them into your code directly. Because of this, you'll need to register the -polyfills using a `metro.config.js` file. This will ensure that Metaplex uses -the polyfills instead of the usual Node.js libraries that aren't supported in -React Native. Below is an example `metro.config.js` file: + - `zlib` + - `react-native-url-polyfill` All of the libraries that the above polyfills + are meant to replace are utilized by the Metaplex libraries in the + background. It's unlikely you'll be importing any of them into your code + directly. Because of this, you'll need to register the polyfills using a + `metro.config.js` file. This will ensure that Metaplex uses the polyfills + instead of the usual Node.js libraries that aren't supported in React + Native. Below is an example `metro.config.js` file: ```js -const { getDefaultConfig } = require("@expo/metro-config"); -const defaultConfig = getDefaultConfig(__dirname); - -defaultConfig.resolver.extraNodeModules = { - crypto: require.resolve("crypto-browserify"), - stream: require.resolve("readable-stream"), - url: require.resolve("react-native-url-polyfill"), - zlib: require.resolve("browserify-zlib"), - path: require.resolve("path-browserify"), -}; +// Learn more https://docs.expo.io/guides/customizing-metro +const { getDefaultConfig } = require("expo/metro-config"); + +/** @type {import('expo/metro-config').MetroConfig} */ +const config = getDefaultConfig(__dirname); -module.exports = defaultConfig; +// Add polyfill resolvers +config.resolver.extraNodeModules.crypto = require.resolve("expo-crypto"); + +module.exports = config; ``` ### Putting it all together @@ -260,9 +264,11 @@ Let's practice this together by building the Mint-A-Day app, where users will able to mint a single NFT snapshot of their lives daily, creating a permanent diary of sorts. -To mint the NFTs we'll be using Metaplex's Javascript SDK along with -[nft.storage](https://nft.storage/) to store images and metadata. All of our -onchain work will be on Devnet. +To mint the NFTs we'll be using Metaplex's Umi libraries along with +[Pinata Cloud](https://pinata.cloud/) to store images and metadata. We are using +Pinata in this tutorial, but +[there are many good solutions for long-term image storage](https://solana.com/developers/guides/getstarted/how-to-create-a-token#create-and-upload-image-and-offchain-metadata). +All of our onchain work will be on Devnet. The first half of this lab is cobbling together the needed components to make Expo, Solana, and Metaplex all work together. We'll do this modularly so you'll @@ -293,12 +299,12 @@ it to run. We use 5GB of ram on our side. To simplify the Expo process, you'll want an Expo Application Services (EAS) account. This will help you build and run the application. -First sign up for an [EAS account](https://expo.dev/). +First sign up for an [EAS account](https://expo.dev/eas). Then, install the EAS CLI and log in: ```bash -npm install --global eas-cli +npm install -g eas-cli eas login ``` @@ -307,13 +313,13 @@ eas login Let's create our app with the following: ```bash -npx create-expo-app -t expo-template-blank-typescript solana-expo +npx create-expo-app --template blank-typescript solana-expo cd solana-expo +npx expo install expo-dev-client # This installs a library that enables the creation of custom development builds, providing useful tools for debugging and testing. While optional, it is recommended for a smoother development experience. ``` This uses `create-expo-app` to generate a new scaffold for us based on the -`expo-template-blank-typescript` template. This is just an empty Typescript -React Native app. +`blank-typescript` template. A Blank template with TypeScript enabled. #### 3. Local build config @@ -331,7 +337,7 @@ Copy and paste the following into the newly created `eas.json`: ```json { "cli": { - "version": ">= 5.2.0" + "version": ">= 3.12.0" }, "build": { "development": { @@ -351,8 +357,8 @@ Copy and paste the following into the newly created `eas.json`: #### 4. Build and emulate -Now let's build the project. You will choose `y` for every answer. This will -take a while to complete. +Now let's build the project locally. You will choose `y` for every answer. This +will take a while to complete. ```bash npx eas build --profile development --platform android --local @@ -385,17 +391,16 @@ already have a Devnet-enabled wallet installed you can skip step 0. #### 0. Install a Devnet-enabled Solana wallet -You'll need a wallet that supports Devnet to test with. In -[our Mobile Wallet Adapter lesson](/content/courses/mobile/mwa-deep-dive) we -created one of these. Let's install it from the solution branch in a different -directory from our app: +You'll need a wallet that supports Devnet to test with. In our +[Mobile Wallet Adapter lesson](/content/courses/mobile/mwa-deep-dive.md) we +created one of these. Let's install it from the repo in a different directory +from our app: ```bash cd .. -git clone https://github.com/Unboxed-Software/react-native-fake-solana-wallet +git clone https://github.com/solana-developers/react-native-fake-solana-wallet cd react-native-fake-solana-wallet -git checkout solution -npm run install +yarn ``` The wallet should be installed on your emulator or device. Make sure to open the @@ -416,11 +421,11 @@ all Solana mobile apps. This will include some polyfills that allow otherwise incompatible packages to work with React native: ```bash -npm install \ +yarn add \ @solana/web3.js \ @solana-mobile/mobile-wallet-adapter-protocol-web3js \ @solana-mobile/mobile-wallet-adapter-protocol \ - react-native-get-random-values \ + expo-crypto \ buffer ``` @@ -432,17 +437,18 @@ Solana-based apps. Create two new folders: `components` and `screens`. We are going to use some boilerplate code from the -[first Mobile lesson](/content/courses/mobile/basic-solana-mobile). We will be -copying over `components/AuthProvider.tsx` and +[first Mobile lesson](/content/courses/mobile/intro-to-solana-mobile.md). We +will be copying over `components/AuthorizationProvider.tsx` and `components/ConnectionProvider.tsx`. These files provide us with a `Connection` object as well as some helper functions that authorize our dapp. -Create file `components/AuthProvider.tsx` and copy the contents -[of our existing Auth Provider from Github](https://raw.githubusercontent.com/Unboxed-Software/solana-advance-mobile/main/components/AuthProvider.tsx) +Create file `components/AuthorizationProvider.tsx` and copy the contents of +[our existing Auth Provider from Github](https://raw.githubusercontent.com/solana-developers/mobile-apps-with-expo/main/components/AuthProvider.tsx) into the new file. Secondly, create file `components/ConnectionProvider.tsx` and copy the contents -[of our existing Connection Provider from Github](https://raw.githubusercontent.com/Unboxed-Software/solana-advance-mobile/main/components/ConnectionProvider.tsx) +of +[our existing Connection Provider from Github](https://raw.githubusercontent.com/solana-developers/mobile-apps-with-expo/main/components/ConnectionProvider.tsx) into the new file. Now let's create a boilerplate for our main screen in `screens/MainScreen.tsx`: @@ -460,18 +466,46 @@ export function MainScreen() { } ``` +Next, create file called `polyfills.ts` for react-native to work with all the +solana dependencies + +```typescript filename="polyfills.ts" +import { getRandomValues as expoCryptoGetRandomValues } from "expo-crypto"; +import { Buffer } from "buffer"; + +// Set global Buffer +global.Buffer = Buffer; + +// Define Crypto class with getRandomValues method +class Crypto { + getRandomValues = expoCryptoGetRandomValues; +} + +// Check if crypto is already defined in the global scope +const hasInbuiltWebCrypto = typeof window.crypto !== "undefined"; + +// Use existing crypto if available, otherwise create a new Crypto instance +const webCrypto = hasInbuiltWebCrypto ? window.crypto : new Crypto(); + +// Polyfill crypto object if it's not already defined +if (!hasInbuiltWebCrypto) { + Object.defineProperty(window, "crypto", { + configurable: true, + enumerable: true, + get: () => webCrypto, + }); +} +``` + Finally, let's change `App.tsx` to wrap our application in the two providers we just created: ```tsx -import "react-native-get-random-values"; -import { StatusBar } from "expo-status-bar"; -import { StyleSheet, Text, View } from "react-native"; import { ConnectionProvider } from "./components/ConnectionProvider"; -import { AuthorizationProvider } from "./components/AuthProvider"; +import { AuthorizationProvider } from "./components/AuthorizationProvider"; import { clusterApiUrl } from "@solana/web3.js"; import { MainScreen } from "./screens/MainScreen"; -global.Buffer = require("buffer").Buffer; +import "./polyfills"; export default function App() { const cluster = "devnet"; @@ -491,22 +525,36 @@ export default function App() { } ``` -Notice we've added two polyfills above: `buffer` and -`react-native-get-random-values`. These are necessary for the Solana -dependencies to run correctly. +Notice we've added the polyfills file `polyfills.ts`. These are necessary for +the Solana dependencies to run correctly. #### 4. Build and run Solana boilerplate +Add the following convenient run scripts to your `package.json` file. + +```json + "scripts": { + "start": "expo start --dev-client", + "android": "expo start --android", + "ios": "expo start --ios", + "web": "expo start --web", + "build": "npx eas build --profile development --platform android", + "build:local": "npx eas build --profile development --platform android --local", + "test": "echo \"No tests specified\" && exit 0", + "clean": "rm -rf node_modules && yarn" + } +``` + Let's make sure everything is working and compiling correctly. In Expo, anytime you change the dependencies, you'll need to rebuild and re-install the app. **_Optional:_** To avoid possible build version conflicts, you may want to _uninstall_ the previous version before you drag and drop the new one in. -Build: +Build locally: ```bash -npx eas build --profile development --platform android --local +yarn run build:local ``` Install: **_Drag_** the resulting build file into your emulator. @@ -514,7 +562,7 @@ Install: **_Drag_** the resulting build file into your emulator. Run: ```bash -npx expo start --dev-client --android +yarn run android ``` Everything should compile and you should have a boilerplate Solana Expo app. @@ -528,12 +576,12 @@ you can reference. #### 1. Install Metaplex dependencies -The Metaplex SDK abstracts away a lot of the minutia of working with NFTs, -however it was written largely for Node.js, so we'll need several more polyfills -to make it work: +[Metaplex programs and tools](https://developers.metaplex.com/programs-and-tools) +abstracts away a lot of the minutia of working with NFTs, however it was written +largely for Node.js, so we'll need several more polyfills to make it work: ```bash -npm install assert \ +yarn add assert \ util \ crypto-browserify \ stream-browserify \ @@ -541,7 +589,12 @@ npm install assert \ browserify-zlib \ path-browserify \ react-native-url-polyfill \ - @metaplex-foundation/js@0.19.4 + @metaplex-foundation/umi \ + @metaplex-foundation/umi-bundle-defaults \ + @metaplex-foundation/umi-signer-wallet-adapters \ + @metaplex-foundation/umi-web3js-adapters \ + @metaplex-foundation/mpl-token-metadata \ + @metaplex-foundation/mpl-candy-machine ``` #### 2. Polyfill config @@ -555,122 +608,95 @@ touch metro.config.js Copy and paste the following into `metro.config.js`: -```js -// Import the default Expo Metro config -const { getDefaultConfig } = require("@expo/metro-config"); - -// Get the default Expo Metro configuration -const defaultConfig = getDefaultConfig(__dirname); - -// Customize the configuration to include your extra node modules -defaultConfig.resolver.extraNodeModules = { - crypto: require.resolve("crypto-browserify"), - stream: require.resolve("readable-stream"), - url: require.resolve("react-native-url-polyfill"), - zlib: require.resolve("browserify-zlib"), - path: require.resolve("path-browserify"), -}; +```javascript +// Learn more https://docs.expo.io/guides/customizing-metro +const { getDefaultConfig } = require("expo/metro-config"); -// Export the modified configuration -module.exports = defaultConfig; +/** @type {import('expo/metro-config').MetroConfig} */ +const config = getDefaultConfig(__dirname); + +// Add polyfill resolvers +config.resolver.extraNodeModules.crypto = require.resolve("expo-crypto"); + +module.exports = config; ``` #### 3. Metaplex provider -We're going to create a Metaplex provider file that will help us access a -`Metaplex` object. This `Metaplex` object is what gives us access to all of the -functions we'll need like `fetch` and `create`. To do this we create a new file -`/components/MetaplexProvider.tsx`. Here we pipe our mobile wallet adapter into -an `IdentitySigner` for the `Metaplex` object to use. This allows it to call -several privileged functions on our behalf: +We'll be creating NFTs using +[Metaplex's MPL Token Metadata library](https://developers.metaplex.com/token-metadata), +leveraging the `Umi` object, a tool commonly used in many Metaplex applications. +This combination will give us access to key functions like `fetch` and `create` +that are essential for NFT creation. To set this up, we will create a new file, +`/components/UmiProvider.tsx`, where we'll connect our mobile wallet adapter to +the `Umi` object. This allows us to execute privileged actions, such as +interacting with token metadata, on our behalf. ```tsx +import { createContext, ReactNode, useContext } from "react"; +import type { Umi } from "@metaplex-foundation/umi"; import { - IdentitySigner, - Metaplex, - MetaplexPlugin, -} from "@metaplex-foundation/js"; -import { - transact, - Web3MobileWallet, -} from "@solana-mobile/mobile-wallet-adapter-protocol-web3js"; -import { Connection, Transaction } from "@solana/web3.js"; -import { useMemo } from "react"; -import { Account } from "./AuthProvider"; - -export const mobileWalletAdapterIdentity = ( - mwaIdentitySigner: IdentitySigner, -): MetaplexPlugin => ({ - install(metaplex: Metaplex) { - metaplex.identity().setDriver(mwaIdentitySigner); - }, -}); - -export const useMetaplex = ( - connection: Connection, - selectedAccount: Account | null, - authorizeSession: (wallet: Web3MobileWallet) => Promise, -) => { - return useMemo(() => { - if (!selectedAccount || !authorizeSession) { - return { mwaIdentitySigner: null, metaplex: null }; - } - - const mwaIdentitySigner: IdentitySigner = { - publicKey: selectedAccount.publicKey, - signMessage: async (message: Uint8Array): Promise => { - return await transact(async (wallet: Web3MobileWallet) => { - await authorizeSession(wallet); - - const signedMessages = await wallet.signMessages({ - addresses: [selectedAccount.publicKey.toBase58()], - payloads: [message], - }); - - return signedMessages[0]; - }); - }, - signTransaction: async ( - transaction: Transaction, - ): Promise => { - return await transact(async (wallet: Web3MobileWallet) => { - await authorizeSession(wallet); + createNoopSigner, + publicKey, + signerIdentity, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { walletAdapterIdentity } from "@metaplex-foundation/umi-signer-wallet-adapters"; +import { mplTokenMetadata } from "@metaplex-foundation/mpl-token-metadata"; +import { mplCandyMachine } from "@metaplex-foundation/mpl-candy-machine"; +import { useAuthorization } from "./AuthorizationProvider"; + +type UmiContext = { + umi: Umi | null; +}; - const signedTransactions = await wallet.signTransactions({ - transactions: [transaction], - }); +const DEFAULT_CONTEXT: UmiContext = { + umi: null, +}; - return signedTransactions[0]; - }); - }, - signAllTransactions: async ( - transactions: Transaction[], - ): Promise => { - return transact(async (wallet: Web3MobileWallet) => { - await authorizeSession(wallet); - const signedTransactions = await wallet.signTransactions({ - transactions: transactions, - }); - return signedTransactions; - }); - }, - }; +export const UmiContext = createContext(DEFAULT_CONTEXT); - const metaplex = Metaplex.make(connection).use( - mobileWalletAdapterIdentity(mwaIdentitySigner), +export const UmiProvider = ({ + endpoint, + children, +}: { + endpoint: string; + children: ReactNode; +}) => { + const { selectedAccount } = useAuthorization(); + const umi = createUmi(endpoint) + .use(mplTokenMetadata()) + .use(mplCandyMachine()); + if (selectedAccount === null) { + const noopSigner = createNoopSigner( + publicKey("11111111111111111111111111111111"), ); + umi.use(signerIdentity(noopSigner)); + } else { + umi.use(walletAdapterIdentity(selectedAccount)); + } - return { metaplex }; - }, [authorizeSession, selectedAccount, connection]); + return {children}; }; + +export function useUmi(): Umi { + const umi = useContext(UmiContext).umi; + if (!umi) { + throw new Error( + "Umi context was not initialized. " + + "Did you forget to wrap your app with ?", + ); + } + return umi; +} ``` #### 4. NFT Provider We're also making a higher-level NFT provider that helps with NFT state management. It combines all three of our previous providers: -`ConnectionProvider`, `AuthProvider`, and `MetaplexProvider` to allow us to -create our `Metaplex` object. We will fill this out at a later step; for now, it +`ConnectionProvider`, `AuthorizationProvider`, and `UmiProvider` to allow us to +create our `Umi` object. We will fill this out at a later step; for now, it makes for a good boilerplate. Let's create the new file `components/NFTProvider.tsx`: @@ -678,17 +704,22 @@ Let's create the new file `components/NFTProvider.tsx`: ```tsx import "react-native-url-polyfill/auto"; import { useConnection } from "./ConnectionProvider"; -import { Account, useAuthorization } from "./AuthProvider"; +import { Account, useAuthorization } from "./AuthorizationProvider"; import React, { ReactNode, createContext, useContext, useState } from "react"; -import { useMetaplex } from "./MetaplexProvider"; +import { useUmi } from "./UmiProvider"; +import { Umi } from "@metaplex-foundation/umi"; export interface NFTProviderProps { children: ReactNode; } -export interface NFTContextState {} +export interface NFTContextState { + umi: Umi | null; +} -const DEFAULT_NFT_CONTEXT_STATE: NFTContextState = {}; +const DEFAULT_NFT_CONTEXT_STATE: NFTContextState = { + umi: null, +}; const NFTContext = createContext(DEFAULT_NFT_CONTEXT_STATE); @@ -698,9 +729,11 @@ export function NFTProvider(props: NFTProviderProps) { const { connection } = useConnection(); const { authorizeSession } = useAuthorization(); const [account, setAccount] = useState(null); - const { metaplex } = useMetaplex(connection, account, authorizeSession); + const { umi } = useUmi(connection, account, authorizeSession); - const state = {}; + const state: NFTContextState = { + umi, + }; return {children}; } @@ -716,13 +749,12 @@ Notice we've added yet another polyfill to the top Now, let's wrap our new `NFTProvider` around `MainScreen` in `App.tsx`: ```tsx -import "react-native-get-random-values"; +import "./polyfills"; import { ConnectionProvider } from "./components/ConnectionProvider"; -import { AuthorizationProvider } from "./components/AuthProvider"; +import { AuthorizationProvider } from "./components/AuthorizationProvider"; import { clusterApiUrl } from "@solana/web3.js"; import { MainScreen } from "./screens/MainScreen"; import { NFTProvider } from "./components/NFTProvider"; -global.Buffer = require("buffer").Buffer; export default function App() { const cluster = "devnet"; @@ -769,12 +801,12 @@ npx expo start --dev-client --android Everything we've done to this point is effectively boilerplate. We need to add the functionality we intend for our Mint-A-Day app to have. Mint-A-day is a -daily snapshot app. It lets users take a snapshot of their life daily in the +daily snapshot app. It allows users take a snapshot of their life daily in the form of minting an NFT. The app will need access to the device's camera and a place to remotely store the captured images. Fortunately, Expo SDK can provide access to the camera and -[NFT.Storage](https://nft.storage) can store your NFT files for free. +[Pinata Cloud](https://pinata.cloud/) can store your NFT files safely. #### 1. Camera setup @@ -806,31 +838,40 @@ as a plugin in `app.json`: } ``` -This particular dependency makes it super simple to use the camera. To allow the -user to take a picture and return the image all you have to do is call the -following: +This dependency makes it incredibly easy to use the camera. To allow the user to +take a picture and return the image, simply call the following: ```tsx +// Launch the camera to take a picture using ImagePicker const result = await ImagePicker.launchCameraAsync({ + // Restrict media types to images only (no videos) mediaTypes: ImagePicker.MediaTypeOptions.Images, + + // Allow the user to edit/crop the image after taking it allowsEditing: true, + + // Specify the aspect ratio of the cropping frame (1:1 for a square) aspect: [1, 1], + + // Set the image quality to maximum (1.0 = highest quality, 0.0 = lowest) quality: 1, }); + +// 'result' will contain information about the captured image +// If the user cancels, result.cancelled will be true, otherwise it will contain the image URI ``` No need to add this anywhere yet - we'll get to it in a few steps. -#### 2. NFT.Storage setup +#### 2. Pinata Cloud setup The last thing we need to do is set up our access to -[nft.storage](https://nft.storage). We'll need to get an API key and add it as -an environment variable, then we need to add one last dependency to convert our -images into a file type we can upload. +[Pinata Cloud](https://pinata.cloud/). We'll need to get an API key and add it +as an environment variable, then we need to add one last dependency to convert +our images into a file type we can upload. -We'll be using NFT.storage to host our NFTs with IPFS since they do this for -free. [Sign up, and create an API key](https://nft.storage/manage/). Keep this -API key private. +We'll be using Pinata Cloud to host our NFTs with IPFS since they do this for a +very cheap price. Remember to keep this API key private. Best practices suggest keeping API keys in a `.env` file with `.env` added to your `.gitignore`. It's also a good idea to create a `.env.example` file that @@ -840,19 +881,13 @@ for the project. Create both files, in the root of your directory and add `.env` to your `.gitignore` file. -Then, add your API key to the `.env` file with the name -`EXPO_PUBLIC_NFT_STORAGE_API`. Now you'll be able to access your API key safely -in the application. - -Lastly, install `rn-fetch-blob`. This package will help us grab images from the -device's URI scheme and turn them into Blobs we can the upload to -[NFT.storage](https://nft.storage). - -Install it with the following: - -```bash -npm i rn-fetch-blob -``` +Next, add your API key to the `.env` file with the variable name +`EXPO_PUBLIC_NFT_PINATA_JWT`. This allows you to securely access your API key in +the application using `process.env.EXPO_PUBLIC_NFT_PINATA_JWT`, unlike +traditional `import "dotenv/config"` which may require additional polyfills when +working with Expo. For more information on securely storing secrets, refer to +the +[Expo documentation on environment variables](https://docs.expo.dev/build-reference/variables/#importing-secrets-from-a-dotenv-file) #### 3. Final build @@ -887,8 +922,8 @@ The app itself is relatively straightforward. The general flow is: 1. The user connects (authorizes) using the `transact` function and by calling `authorizeSession` inside the callback -2. Our code then uses the `Metaplex` object to fetch all of the NFTs created by - the user +2. Our code then uses the `Umi` object to fetch all of the NFTs created by the + user 3. If an NFT has not been created for the current day, allow the user to take a picture, upload it, and mint it as an NFT @@ -897,26 +932,28 @@ The app itself is relatively straightforward. The general flow is: `NFTProvider.tsx` will control the state with our custom `NFTProviderContext`. This should have the following fields: -- `metaplex: Metaplex | null` - Holds the metaplex object that we use to call - `fetch` and `create` +- `umi: Umi | null` - Holds the metaplex object that we use to call `fetch` and + `create` - `publicKey: PublicKey | null` - The NFT creator's public key - `isLoading: boolean` - Manages loading state -- `loadedNFTs: (Nft | Sft | SftWithToken | NftWithToken)[] | null` - An array of - the user's snapshot NFTs -- `nftOfTheDay: (Nft | Sft | SftWithToken | NftWithToken) | null` - A reference - to the NFT created today +- `loadedNFTs: (DigitalAsset)[] | null` - An array of the user's snapshot NFTs +- `nftOfTheDay: (DigitalAsset) | null` - A reference to the NFT created today - `connect: () => void` - A function for connecting to the Devnet-enabled wallet - `fetchNFTs: () => void` - A function that fetches the user's snapshot NFTs - `createNFT: (name: string, description: string, fileUri: string) => void` - A function that creates a new snapshot NFT +The `DigitalAsset` type comes from `@metaplex-foundation/mpl-token-metadata` +which has metadata, off-chain metadata, collection data, plugins (including +Attributes), and more. + ```tsx export interface NFTContextState { metaplex: Metaplex | null; // Holds the metaplex object that we use to call `fetch` and `create` on. publicKey: PublicKey | null; // The public key of the authorized wallet isLoading: boolean; // Loading state - loadedNFTs: (Nft | Sft | SftWithToken | NftWithToken)[] | null; // Array of loaded NFTs that contain metadata - nftOfTheDay: (Nft | Sft | SftWithToken | NftWithToken) | null; // The NFT snapshot created on the current day + loadedNFTs: DigitalAsset[] | null; // Array of loaded NFTs that contain metadata + nftOfTheDay: DigitalAsset | null; // The NFT snapshot created on the current day connect: () => void; // Connects (and authorizes) us to the Devnet-enabled wallet fetchNFTs: () => void; // Fetches the NFTs using the `metaplex` object createNFT: (name: string, description: string, fileUri: string) => void; // Creates the NFT @@ -943,201 +980,220 @@ through the code for each of them and then show you the entire file at the end: }; ``` -2. `fetchNFTs` - This function will fetch the NFTs using Metaplex: - - ```tsx - const fetchNFTs = async () => { - if (!metaplex || !account || isLoading) return; +2. `fetchNFTs` - This function will fetch the NFTs using + `fetchAllDigitalAssetByCreator`: - setIsLoading(true); - - try { - const nfts = await metaplex.nfts().findAllByCreator({ - creator: account.publicKey, - }); - - const loadedNFTs = await Promise.all( - nfts.map(nft => { - return metaplex.nfts().load({ metadata: nft as Metadata }); - }), - ); - setLoadedNFTs(loadedNFTs); - - // Check if we already took a snapshot today - const nftOfTheDayIndex = loadedNFTs.findIndex(nft => { - return formatDate(new Date(Date.now())) === nft.name; - }); - - if (nftOfTheDayIndex !== -1) { - setNftOfTheDay(loadedNFTs[nftOfTheDayIndex]); - } - } catch (error) { - console.log(error); - } finally { - setIsLoading(false); - } - }; - ``` - -3. `createNFT` - This function will upload a file to NFT.Storage, and then use - Metaplex to create and mint an NFT to your wallet. This comes in three parts, - uploading the image, uploading the metadata and then minting the NFT. +```tsx +const fetchNFTs = useCallback(async () => { + if (!umi || !account || isLoading) return; + setIsLoading(true); + try { + const creatorPublicKey = fromWeb3JsPublicKey(account.publicKey); + const nfts = await fetchAllDigitalAssetByCreator(umi, creatorPublicKey); + setLoadedNFTs(nfts); + } catch (error) { + console.error("Failed to fetch NFTs:", error); + } finally { + setIsLoading(false); + } +}, [umi, account, isLoading]); +``` - To upload to NFT.Storage you just make a POST with your API key and the - image/metadata as the body. +3. `createNFT` - This function will upload a file to Pinata Cloud, and then use + `createNft` function from to create and mint an NFT to your wallet. This + comes in three parts, uploading the image, uploading the metadata and then + minting the NFT. To upload to Pinata Cloud, you can use their + [HTTP API endpoint](https://docs.pinata.cloud/api-reference/endpoint/upload-a-file), + allowing interaction with their API for file uploads. We'll create two helper functions for uploading the image and metadata separately, then tie them together into a single `createNFT` function: - ```tsx - // https://nft.storage/api-docs/ - const uploadImage = async (fileUri: string): Promise => { - const imageBytesInBase64: string = await RNFetchBlob.fs.readFile( - fileUri, - "base64", - ); - const bytes = Buffer.from(imageBytesInBase64, "base64"); - - const response = await fetch("https://api.nft.storage/upload", { - method: "POST", - headers: { - Authorization: `Bearer ${process.env.EXPO_PUBLIC_NFT_STORAGE_API}`, - "Content-Type": "image/jpg", - }, - body: bytes, - }); +```tsx +const ipfsPrefix = `https://${process.env.EXPO_PUBLIC_NFT_PINATA_GATEWAY_URL}/ipfs/`; +async function uploadImageFromURI(fileUri: string) { + try { + const form = new FormData(); + const randomFileName = `image_${Date.now()}_${Math.floor(Math.random() * 10000)}.jpg`; + + form.append("file", { + uri: Platform.OS === "android" ? fileUri : fileUri.replace("file://", ""), + type: "image/jpeg", // Adjust the type as necessary + name: randomFileName, // Adjust the name as necessary + }); - const data = await response.json(); - const cid = data.value.cid; + const options = { + method: "POST", + headers: { + Authorization: `Bearer ${process.env.EXPO_PUBLIC_NFT_PINATA_JWT}`, + "Content-Type": "multipart/form-data", + }, + body: form, + }; - return cid as string; - }; + const response = await fetch( + "https://api.pinata.cloud/pinning/pinFileToIPFS", + options, + ); + const responseJson = await response.json(); + return responseJson; + } catch (error) { + console.error("Upload failed:", error); + } finally { + console.log("Upload process completed."); + } +} - const uploadMetadata = async ( - name: string, - description: string, - imageCID: string, - ): Promise => { - const response = await fetch("https://api.nft.storage/upload", { - method: "POST", - headers: { - Authorization: `Bearer ${process.env.EXPO_PUBLIC_NFT_STORAGE_API}`, - }, - body: JSON.stringify({ - name, - description, - image: `https://ipfs.io/ipfs/${imageCID}`, - }), - }); +async function uploadMetadataJson( + name: string, + description: string, + imageCID: string, +) { + const randomFileName = `metadata_${Date.now()}_${Math.floor(Math.random() * 10000)}.json`; + const data = JSON.stringify({ + pinataContent: { + name, + description, + imageCID, + }, + pinataMetadata: { + name: randomFileName, + }, + }); + const response = await fetch( + "https://api.pinata.cloud/pinning/pinJSONToIPFS", + { + method: "POST", + headers: { + "Content-Type": "application/x-www-form-urlencoded", + Accept: "application/json", + Authorization: `Bearer ${process.env.EXPO_PUBLIC_NFT_PINATA_JWT}`, + }, + body: data, + }, + ); + const responseBody = await response.json(); - const data = await response.json(); - const cid = data.value.cid; + return responseBody; +} - return cid; - }; - ``` +const uploadImage = useCallback(async (fileUri: string): Promise => { + const upload = await uploadImageFromURI(fileUri); + return upload.IpfsHash; +}, []); - Minting the NFT after the image and metadata have been uploaded is as simple - as calling `metaplex.nfts().create(...)`. Below shows the `createNFT` - function tying everything together: +const uploadMetadata = useCallback( + async ( + name: string, + description: string, + imageCID: string, + ): Promise => { + const uploadResponse = await uploadMetadataJson( + name, + description, + imageCID, + ); + return uploadResponse.IpfsHash; + }, + [], +); +``` - ```tsx - const createNFT = async ( - name: string, - description: string, - fileUri: string, - ) => { - if (!metaplex || !account || isLoading) return; +Minting the NFT after the image and metadata have been uploaded is as simple as +calling `createNft` from `@metaplex-foundation/mpl-token-metadata`. Below shows +the `createNFT` function tying everything together: - setIsLoading(true); - try { - const imageCID = await uploadImage(fileUri); - const metadataCID = await uploadMetadata(name, description, imageCID); - - const nft = await metaplex.nfts().create({ - uri: `https://ipfs.io/ipfs/${metadataCID}`, - name: name, - sellerFeeBasisPoints: 0, - }); - - setNftOfTheDay(nft.nft); - } catch (error) { - console.log(error); - } finally { - setIsLoading(false); - } - }; - ``` +```tsx +const createNFT = useCallback( + async (name: string, description: string, fileUri: string) => { + if (!umi || !account || isLoading) return; + setIsLoading(true); + try { + console.log(`Creating NFT...`); + const imageCID = await uploadImage(fileUri); + const metadataCID = await uploadMetadata(name, description, imageCID); + const mint = generateSigner(umi); + const transaction = createNft(umi, { + mint, + name, + uri: ipfsPrefix + metadataCID, + sellerFeeBasisPoints: percentAmount(0), + }); + await transaction.sendAndConfirm(umi); + const createdNft = await fetchDigitalAsset(umi, mint.publicKey); + setNftOfTheDay(createdNft); + } catch (error) { + console.error("Failed to create NFT:", error); + } finally { + setIsLoading(false); + } + }, + [umi, account, isLoading, uploadImage, uploadMetadata], +); +``` We'll put all of the above into the `NFTProvider.tsx` file. All together, this looks as follows: ```tsx import "react-native-url-polyfill/auto"; -import React, { ReactNode, createContext, useContext, useState } from "react"; import { - Metaplex, + DigitalAsset, + createNft, + fetchAllDigitalAssetByCreator, + fetchDigitalAsset, +} from "@metaplex-foundation/mpl-token-metadata"; +import { PublicKey, - Metadata, - Nft, - Sft, - SftWithToken, - NftWithToken, -} from "@metaplex-foundation/js"; -import { useConnection } from "./ConnectionProvider"; -import { Connection, clusterApiUrl } from "@solana/web3.js"; -import { transact } from "@solana-mobile/mobile-wallet-adapter-protocol"; -import { Account, useAuthorization } from "./AuthProvider"; -import RNFetchBlob from "rn-fetch-blob"; -import { useMetaplex } from "./MetaplexProvider"; + Umi, + generateSigner, + percentAmount, +} from "@metaplex-foundation/umi"; +import { fromWeb3JsPublicKey } from "@metaplex-foundation/umi-web3js-adapters"; +import { clusterApiUrl, PublicKey as solanaPublicKey } from "@solana/web3.js"; +import React, { + ReactNode, + createContext, + useCallback, + useContext, + useEffect, + useMemo, + useState, +} from "react"; +import { useUmi } from "./UmiProvider"; +import { useMobileWallet } from "../utils/useMobileWallet"; +import { Account, useAuthorization } from "./AuthorizationProvider"; +import { Platform } from "react-native"; export interface NFTProviderProps { children: ReactNode; } export interface NFTContextState { - metaplex: Metaplex | null; - publicKey: PublicKey | null; - isLoading: boolean; - loadedNFTs: (Nft | Sft | SftWithToken | NftWithToken)[] | null; - nftOfTheDay: (Nft | Sft | SftWithToken | NftWithToken) | null; - connect: () => void; - fetchNFTs: () => void; - createNFT: (name: string, description: string, fileUri: string) => void; + umi: Umi | null; // Holds the Umi object that we use to call `fetch` and `create` on. + publicKey: PublicKey | null; // The public key of the authorized wallet + isLoading: boolean; // Loading state + loadedNFTs: DigitalAsset[] | null; // Array of loaded NFTs that contain metadata + nftOfTheDay: DigitalAsset | null; // The NFT snapshot created on the current day + connect: () => void; // Connects (and authorizes) us to the Devnet-enabled wallet + fetchNFTs: () => void; // Fetches the NFTs using the `metaplex` object + createNFT: (name: string, description: string, fileUri: string) => void; // Creates the NFT } -const DEFAULT_NFT_CONTEXT_STATE: NFTContextState = { - metaplex: new Metaplex(new Connection(clusterApiUrl("devnet"))), - publicKey: null, - isLoading: false, - loadedNFTs: null, - nftOfTheDay: null, - connect: () => PublicKey.default, - fetchNFTs: () => {}, - createNFT: (name: string, description: string, fileUri: string) => {}, -}; - -const NFTContext = createContext(DEFAULT_NFT_CONTEXT_STATE); - export function formatDate(date: Date) { return `${date.getDate()}.${date.getMonth()}.${date.getFullYear()}`; } +const NFTContext = createContext(null); + export function NFTProvider(props: NFTProviderProps) { - const { children } = props; - const { connection } = useConnection(); - const { authorizeSession } = useAuthorization(); + const ipfsPrefix = `https://${process.env.EXPO_PUBLIC_NFT_PINATA_GATEWAY_URL}/ipfs/`; const [account, setAccount] = useState(null); + const [nftOfTheDay, setNftOfTheDay] = useState(null); + const [loadedNFTs, setLoadedNFTs] = useState(null); const [isLoading, setIsLoading] = useState(false); - const [nftOfTheDay, setNftOfTheDay] = useState< - (Nft | Sft | SftWithToken | NftWithToken) | null - >(null); - const [loadedNFTs, setLoadedNFTs] = useState< - (Nft | Sft | SftWithToken | NftWithToken)[] | null - >(null); - - const { metaplex } = useMetaplex(connection, account, authorizeSession); - + const umi = useUmi(); + const { children } = props; const connect = () => { if (isLoading) return; @@ -1149,118 +1205,151 @@ export function NFTProvider(props: NFTProviderProps) { setIsLoading(false); }); }; - - const fetchNFTs = async () => { - if (!metaplex || !account || isLoading) return; - - setIsLoading(true); - + async function uploadImageFromURI(fileUri: string) { try { - const nfts = await metaplex.nfts().findAllByCreator({ - creator: account.publicKey, + const form = new FormData(); + const randomFileName = `image_${Date.now()}_${Math.floor(Math.random() * 10000)}.jpg`; + + // In React Native, especially when working with form data and files, you may need to send files using an object that contains a URI (file path), especially on Android and iOS platforms. However, this structure may not be recognized by TypeScript's strict type checking + // @ts-ignore + form.append("file", { + uri: + Platform.OS === "android" ? fileUri : fileUri.replace("file://", ""), + type: "image/jpeg", // Adjust the type as necessary + name: randomFileName, // Adjust the name as necessary }); - const loadedNFTs = await Promise.all( - nfts.map(nft => { - return metaplex.nfts().load({ metadata: nft as Metadata }); - }), + const options = { + method: "POST", + headers: { + Authorization: `Bearer ${process.env.EXPO_PUBLIC_NFT_PINATA_JWT}`, + "Content-Type": "multipart/form-data", + }, + body: form, + }; + + const response = await fetch( + "https://api.pinata.cloud/pinning/pinFileToIPFS", + options, ); - setLoadedNFTs(loadedNFTs); - - // Check if we already took a snapshot today - const nftOfTheDayIndex = loadedNFTs.findIndex(nft => { - return formatDate(new Date(Date.now())) === nft.name; - }); + const responseJson = await response.json(); + console.log(responseJson.IpfsHash); - if (nftOfTheDayIndex !== -1) { - setNftOfTheDay(loadedNFTs[nftOfTheDayIndex]); - } + return responseJson; } catch (error) { - console.log(error); + console.error("Upload failed:", error); } finally { - setIsLoading(false); + console.log("Upload process completed."); } - }; - - // https://nft.storage/api-docs/ - const uploadImage = async (fileUri: string): Promise => { - const imageBytesInBase64: string = await RNFetchBlob.fs.readFile( - fileUri, - "base64", - ); - const bytes = Buffer.from(imageBytesInBase64, "base64"); - - const response = await fetch("https://api.nft.storage/upload", { - method: "POST", - headers: { - Authorization: `Bearer ${process.env.EXPO_PUBLIC_NFT_STORAGE_API}`, - "Content-Type": "image/jpg", - }, - body: bytes, - }); - - const data = await response.json(); - const cid = data.value.cid; - - return cid as string; - }; + } - const uploadMetadata = async ( - name: string, - description: string, - imageCID: string, - ): Promise => { - const response = await fetch("https://api.nft.storage/upload", { - method: "POST", - headers: { - Authorization: `Bearer ${process.env.EXPO_PUBLIC_NFT_STORAGE_API}`, - }, - body: JSON.stringify({ + async function uploadMetadataJson( + name = "Solanify", + description = "A truly sweet NFT of your day.", + imageCID = "bafkreih5aznjvttude6c3wbvqeebb6rlx5wkbzyppv7garjiubll2ceym4", + ) { + const randomFileName = `metadata_${Date.now()}_${Math.floor(Math.random() * 10000)}.json`; + const data = JSON.stringify({ + pinataContent: { name, description, - image: `https://ipfs.io/ipfs/${imageCID}`, - }), + imageCID, + }, + pinataMetadata: { + name: randomFileName, + }, }); + const response = await fetch( + "https://api.pinata.cloud/pinning/pinJSONToIPFS", + { + method: "POST", + headers: { + "Content-Type": "application/x-www-form-urlencoded", + Accept: "application/json", + Authorization: `Bearer ${process.env.EXPO_PUBLIC_NFT_PINATA_JWT}`, + }, + body: data, + }, + ); + const responseBody = await response.json(); - const data = await response.json(); - const cid = data.value.cid; - - return cid; - }; - - const createNFT = async ( - name: string, - description: string, - fileUri: string, - ) => { - if (!metaplex || !account || isLoading) return; + return responseBody; + } + const fetchNFTs = useCallback(async () => { + if (!umi || !account || isLoading) return; setIsLoading(true); try { - const imageCID = await uploadImage(fileUri); - const metadataCID = await uploadMetadata(name, description, imageCID); - - const nft = await metaplex.nfts().create({ - uri: `https://ipfs.io/ipfs/${metadataCID}`, - name: name, - sellerFeeBasisPoints: 0, - }); - - setNftOfTheDay(nft.nft); + const creatorPublicKey = fromWeb3JsPublicKey(account.publicKey); + const nfts = await fetchAllDigitalAssetByCreator(umi, creatorPublicKey); + setLoadedNFTs(nfts); } catch (error) { - console.log(error); + console.error("Failed to fetch NFTs:", error); } finally { setIsLoading(false); } - }; + }, [umi, account, isLoading]); + + const uploadImage = useCallback(async (fileUri: string): Promise => { + const upload = await uploadImageFromURI(fileUri); + return upload.IpfsHash; + }, []); + + const uploadMetadata = useCallback( + async ( + name: string, + description: string, + imageCID: string, + ): Promise => { + const uploadResponse = await uploadMetadataJson( + name, + description, + imageCID, + ); + return uploadResponse.IpfsHash; + }, + [], + ); - const publicKey = account?.publicKey ?? null; + const createNFT = useCallback( + async (name: string, description: string, fileUri: string) => { + if (!umi || !account || isLoading) return; + setIsLoading(true); + try { + console.log(`Creating NFT...`); + const imageCID = await uploadImage(fileUri); + const metadataCID = await uploadMetadata(name, description, imageCID); + const mint = generateSigner(umi); + const transaction = createNft(umi, { + mint, + name, + uri: ipfsPrefix + metadataCID, + sellerFeeBasisPoints: percentAmount(0), + }); + await transaction.sendAndConfirm(umi); + const createdNft = await fetchDigitalAsset(umi, mint.publicKey); + setNftOfTheDay(createdNft); + } catch (error) { + console.error("Failed to create NFT:", error); + } finally { + setIsLoading(false); + } + }, + [umi, account, isLoading, uploadImage, uploadMetadata], + ); - const state = { + const publicKey = useMemo( + () => + account?.publicKey + ? fromWeb3JsPublicKey(account.publicKey as solanaPublicKey) + : null, + [account], + ); + + const state: NFTContextState = { isLoading, - account, publicKey, - metaplex, + umi, nftOfTheDay, loadedNFTs, connect, @@ -1271,7 +1360,13 @@ export function NFTProvider(props: NFTProviderProps) { return {children}; } -export const useNFT = (): NFTContextState => useContext(NFTContext); +export const useNFT = (): NFTContextState => { + const context = useContext(NFTContext); + if (!context) { + throw new Error("useNFT must be used within an NFTProvider"); + } + return context; +}; ``` #### 2. Main Screen @@ -1401,50 +1496,90 @@ export function MainScreen() { const [previousImages, setPreviousImages] = React.useState(DEFAULT_IMAGES); const todaysDate = new Date(Date.now()); + const ipfsPrefix = `https://${process.env.EXPO_PUBLIC_NFT_PINATA_GATEWAY_URL}/ipfs/`; + type NftMetaResponse = { + name: string; + description: string; + imageCID: string; + }; + const fetchMetadata = async (uri: string) => { + try { + const response = await fetch(uri); + const metadata = await response.json(); + return metadata as NftMetaResponse; + } catch (error) { + console.error("Error fetching metadata:", error); + return null; + } + }; useEffect(() => { if (!loadedNFTs) return; - const loadedSnapshots = loadedNFTs.map(loadedNft => { - if (!loadedNft.json) return null; - if (!loadedNft.json.name) return null; - if (!loadedNft.json.description) return null; - if (!loadedNft.json.image) return null; + const loadSnapshots = async () => { + const loadedSnapshots = await Promise.all( + loadedNFTs.map(async loadedNft => { + if (!loadedNft.metadata.name) return null; + if (!loadedNft.metadata.uri) return null; - const uri = loadedNft.json.image; - const unixTime = Number(loadedNft.json.description); + const metadata = await fetchMetadata(loadedNft.metadata.uri); + if (!metadata) return null; - if (!uri) return null; - if (isNaN(unixTime)) return null; + const { imageCID, description } = metadata; + if (!imageCID || !description) return null; - return { - uri: loadedNft.json.image, - date: new Date(unixTime), - } as NFTSnapshot; - }); + const unixTime = Number(description); + if (isNaN(unixTime)) return null; - // Filter out null values - const cleanedSnapshots = loadedSnapshots.filter(loadedSnapshot => { - return loadedSnapshot !== null; - }) as NFTSnapshot[]; + return { + uri: ipfsPrefix + imageCID, + date: new Date(unixTime), + } as NFTSnapshot; + }), + ); - // Sort by date - cleanedSnapshots.sort((a, b) => { - return b.date.getTime() - a.date.getTime(); - }); + // Filter out null values + const cleanedSnapshots = loadedSnapshots.filter( + (snapshot): snapshot is NFTSnapshot => snapshot !== null, + ); - setPreviousImages(cleanedSnapshots as NFTSnapshot[]); + // Sort by date + cleanedSnapshots.sort((a, b) => b.date.getTime() - a.date.getTime()); + + setPreviousImages(cleanedSnapshots); + }; + + loadSnapshots(); }, [loadedNFTs]); useEffect(() => { if (!nftOfTheDay) return; - setCurrentImage({ - uri: nftOfTheDay.json?.image ?? "", - date: todaysDate, - }); - }, [nftOfTheDay]); + const fetchNftOfTheDayMetadata = async () => { + try { + if (!nftOfTheDay.metadata.uri) { + console.error("No metadata URI found for nftOfTheDay"); + return; + } + + const response = await fetchMetadata(nftOfTheDay.metadata.uri); + + if (!response?.imageCID) { + console.error("No image found in nftOfTheDay metadata"); + return; + } + + setCurrentImage({ + uri: ipfsPrefix + response.imageCID, + date: todaysDate, + }); + } catch (error) { + console.error("Error fetching nftOfTheDay metadata:", error); + } + }; + fetchNftOfTheDayMetadata(); + }, [nftOfTheDay, todaysDate]); const mintNFT = async () => { const result = await ImagePicker.launchCameraAsync({ mediaTypes: ImagePicker.MediaTypeOptions.Images, @@ -1533,7 +1668,7 @@ approve the app. Fetch all of the NFTs by tapping `Fetch NFTs`. Lastly, tap Congratulations! That was not an easy or quick lab. You're doing great if you've made it this far. If you run into any issues, please go back through the lab and/or reference the final solution code on the -[`main` branch in Github](https://github.com/Unboxed-Software/solana-advance-mobile). +[`main` branch in Github](https://github.com/solana-developers/mobile-apps-with-expo). ## Challenge diff --git a/content/courses/native-onchain-development/cross-program-invocations.md b/content/courses/native-onchain-development/cross-program-invocations.md index dff3df609..693c95bcd 100644 --- a/content/courses/native-onchain-development/cross-program-invocations.md +++ b/content/courses/native-onchain-development/cross-program-invocations.md @@ -5,74 +5,84 @@ objectives: - Describe how to construct and use CPIs - Explain how a program provides a signature for a PDA - Avoid common pitfalls and troubleshoot common errors associated with CPIs -description: "How to invoke functions in other Solana programs." +description: "Learn how to invoke functions in other Solana programs." --- ## Summary -- A **Cross-Program Invocation (CPI)** is a call from one program to another, - targeting a specific instruction on the program called -- CPIs are made using the commands `invoke` or `invoke_signed`, the latter being - how programs provide signatures for PDAs that they own -- CPIs make programs in the Solana ecosystem completely interoperable because - all public instructions of a program can be invoked by another program via a - CPI -- Because we have no control over the accounts and data submitted to a program, - it's important to verify all of the parameters passed into a CPI to ensure - program security +- A **Cross-Program Invocation (CPI)** is when one program calls another, + targeting a specific instruction in the called program. +- CPIs are performed using the commands `invoke` or `invoke_signed`, with the + latter enabling programs to sign on behalf of Program Derived Addresses (PDAs) + they own. +- CPIs enable Solana programs to be fully interoperable, allowing any + instruction handler to be invoked by another program via a CPI. +- CPIs are commonly used. For example, if your program transfers tokens, it will + perform a CPI to the Token or Token Extensions programs to execute the + transfer. +- Since the calling program in a CPI does not have control over the accounts or + data passed to the invoked program, it's crucial for the invoked program to + verify all parameters. This ensures that malicious or incorrect data doesn't + compromise program security. ## Lesson ### What is a CPI? -A Cross-Program Invocation (CPI) is a direct call from one program into another. -Just as any client can call any program using the JSON RPC, any program can call -any other program directly. The only requirement for invoking an instruction on -another program from within your program is that you construct the instruction -correctly. You can make CPIs to native programs, other programs you've created, -and third party programs. CPIs essentially turn the entire Solana ecosystem into -one giant API that is at your disposal as a developer. +A **Cross-Program Invocation (CPI)** is when one program directly calls another +program's instruction, similar to how a client makes calls to programs using the +JSON RPC API. In a CPI, your program can call native programs, third-party +programs, or programs you've created. CPIs allow for seamless interaction +between programs, effectively making the entire Solana ecosystem one large API +for developers. -CPIs have a similar make up to instructions that you are used to creating client -side. There are some intricacies and differences depending on if you are using -`invoke` or `invoke_signed`. We'll be covering both of these later in this -lesson. +To invoke an instruction on another program, you need to construct the +instruction correctly. The process of creating a CPI is similar to creating +instructions on the client side, but there are important distinctions when using +`invoke` or `invoke_signed`. We'll dive into both methods later in this lesson. -### How to make a CPI +### Making a Cross-Program Invocation (CPI) -CPIs are made using the -[`invoke`](https://docs.rs/solana-program/1.10.19/solana_program/program/fn.invoke.html) +To make a CPI, use either the +[`invoke`](https://docs.rs/solana-program/latest/solana_program/program/fn.invoke.html) or -[`invoke_signed`](https://docs.rs/solana-program/1.10.19/solana_program/program/fn.invoke_signed.html) -function from the `solana_program` crate. You use `invoke` to essentially pass -through the original transaction signature that was passed into your program. -You use `invoke_signed` to have your program "sign" for its PDAs. +[`invoke_signed`](https://docs.rs/solana-program/latest/solana_program/program/fn.invoke_signed.html) +functions from the `solana_program` crate. + +- Use `invoke` to pass through the original transaction signature that was + submitted to your program. +- Use `invoke_signed` when your program needs to "sign" for its Program Derived + Addresses (PDAs). ```rust -// Used when there are not signatures for PDAs needed +// Used when no signatures are required for PDAs pub fn invoke( instruction: &Instruction, account_infos: &[AccountInfo<'_>] ) -> ProgramResult -// Used when a program must provide a 'signature' for a PDA, hence the signer_seeds parameter +// Used when a program must provide a 'signature' for a PDA, utilizing the signer_seeds parameter pub fn invoke_signed( instruction: &Instruction, account_infos: &[AccountInfo<'_>], + // An array of signing PDAs, each with an array of seeds, which are an array of `u8` bytes. signers_seeds: &[&[&[u8]]] ) -> ProgramResult ``` -CPIs extend the privileges of the caller to the callee. If the instruction the -callee program is processing contains an account that was marked as a signer or -writable when originally passed into the caller program, then it will be -considered a signer or writable account in the invoked program as well. +When you make a Cross-Program Invocation (CPI), the privileges of the invoking +program are extended to the invoked program. If the invoking program's +instruction handler had accounts marked as a signer or writable when calling the +invoked program, those accounts retain their signer or writable status in the +invoked program. + + -It's important to note that you as the developer decide which accounts to pass -into the CPI. You can think of a CPI as building another instruction from -scratch with only information that was passed into your program. +As the developer, you have full control over which accounts are passed into the +CPI. You can think of constructing a CPI as building a new instruction from +scratch, but only with the data that was passed into your program. -#### CPI with `invoke` +#### CPI with invoke function ```rust invoke( @@ -85,13 +95,13 @@ invoke( )?; ``` -- `program_id` - the public key of the program you are going to invoke -- `account` - a list of account metadata as a vector. You need to include every - account that the invoked program will read or write -- `data` - a byte buffer representing the data being passed to the callee - program as a vector +- `program_id` - The public key of the program you're invoking. +- `account` - A list of account metadata as a vector. Include every account the + invoked program will read or write. +- `data` - A byte buffer representing the data passed to the invoked program as + a vector. -The `Instruction` type has the following definition: +The `Instruction` struct has the following definition: ```rust pub struct Instruction { @@ -101,23 +111,21 @@ pub struct Instruction { } ``` -Depending on the program you're making the call to, there may be a crate -available with helper functions for creating the `Instruction` object. Many -individuals and organizations create publicly available crates alongside their -programs that expose these sorts of functions to simplify calling their -programs. This is similar to the Typescript libraries we've used in this course -(e.g. [@solana/web3.js](https://solana-labs.github.io/solana-web3.js/), -[@solana/spl-token](https://solana-labs.github.io/solana-program-library/token/js/)). -For example, in this lesson's lab we'll be using the `spl_token` crate to create -minting instructions. In all other cases, you'll need to create the -`Instruction` instance from scratch. +Depending on the program you're calling, there may be a crate available with +helper functions for creating the `Instruction` object. Many individuals and +organizations provide publicly available crates alongside their programs that +expose these functions, simplifying program interaction. + +For example, in this lesson's lab, we'll be using the `spl_token` crate to +create minting instructions. In cases where no such crate is available, you'll +need to manually create the `Instruction` instance. -While the `program_id` field is fairly straightforward, the `accounts` and -`data` fields require some explanation. +While the `program_id` field is straightforward, the `accounts` and `data` +fields require further explanation. -Both the `accounts` and `data` fields are of type `Vec`, or vector. You can use +Both the `accounts` and `data` fields are of type `Vec` (vector). You can use the [`vec`](https://doc.rust-lang.org/std/macro.vec.html) macro to construct a -vector using array notation, like so: +vector using array notation, as shown below: ```rust let v = vec![1, 2, 3]; @@ -151,14 +159,16 @@ vec![ ] ``` -The final field of the instruction object is the data, as a byte buffer of -course. You can create a byte buffer in Rust using the `vec` macro again, which -has an implemented function allowing you to create a vector of certain length. -Once you have initialized an empty vector, you would construct the byte buffer -similar to how you would client-side. Determine the data required by the callee -program and the serialization format used and write your code to match. Feel -free to read up on some of the -[features of the `vec` macro available to you here](https://doc.rust-lang.org/alloc/vec/struct.Vec.html#). +The final field of the `Instruction` object is the data, represented as a byte +buffer. In Rust, you can create this buffer by using +[`Vec::with_capacity()`](https://doc.rust-lang.org/std/vec/struct.Vec.html#method.with_capacity) +to allocate space, and then populate the vector by pushing values or extending +it with slices. This allows you to construct the byte buffer incrementally, +similar to how you would on the client side. + +Determine the data required by the invoked program and the serialization format +used, then write your code to match. Feel free to read up on some of the +[features of the `vec` macro](https://doc.rust-lang.org/alloc/vec/struct.Vec.html#). ```rust let mut vec = Vec::with_capacity(3); @@ -177,19 +187,18 @@ input, iterates over the slice, clones each element, and then appends it to the In addition to the instruction, both `invoke` and `invoke_signed` also require a list of `account_info` objects. Just like the list of `AccountMeta` objects you -added to the instruction, you must include all of the accounts that the program -you're calling will read or write. +added to the instruction, you must include all the accounts that the program +you're invoking will read or write. By the time you make a CPI in your program, you should have already grabbed all the `account_info` objects that were passed into your program and stored them in variables. You'll construct your list of `account_info` objects for the CPI by -choosing which of these accounts to copy and send along. +selecting which of these accounts to copy and send along. -You can copy each `account_info` object that you need to pass into the CPI using -the +You can copy each `account_info` object you need to pass into the CPI using the [`Clone`](https://docs.rs/solana-program/1.10.19/solana_program/account_info/struct.AccountInfo.html#impl-Clone) -trait that is implemented on the `account_info` struct in the `solana_program` -crate. This `Clone` trait returns a copy of the +trait implemented on the `account_info` struct in the `solana_program` crate. +This `Clone` trait returns a copy of the [`account_info`](https://docs.rs/solana-program/1.10.19/solana_program/account_info/struct.AccountInfo.html) instance. @@ -197,7 +206,7 @@ instance. &[first_account.clone(), second_account.clone(), third_account.clone()] ``` -#### CPI with `invoke` +#### CPI with invoke With both the instruction and the list of accounts created, you can perform a call to `invoke`. @@ -214,21 +223,22 @@ invoke( ``` There's no need to include a signature because the Solana runtime passes along -the original signature passed into your program. Remember, `invoke` won't work -if a signature is required on behalf of a PDA. For that, you'll need to use +the original signature provided to your program. Remember, `invoke` won't work +if a signature is required on behalf of a PDA. In that case, you'll need to use `invoke_signed`. -#### CPI with `invoke_signed` +#### CPI with invoke_signed -Using `invoke_signed` is a little different just because there is an additional -field that requires the seeds used to derive any PDAs that must sign the -transaction. You may recall from previous lessons that PDAs do not lie on the -Ed25519 curve and, therefore, do not have a corresponding secret key. You've -been told that programs can provide signatures for their PDAs, but have not -learned how that actually happens - until now. Programs provide signatures for -their PDAs with the `invoke_signed` function. The first two fields of -`invoke_signed` are the same as `invoke`, but there is an additional -`signers_seeds` field that comes into play here. +Using `invoke_signed` is slightly different because there is an additional field +that requires the seeds used to derive any PDAs that must sign the transaction. +You may recall from previous lessons that PDAs do not lie on the Ed25519 curve +and, therefore, do not have a corresponding secret key. You've learned that +programs can provide signatures for their PDAs, but haven't yet learned how this +works—until now. Programs provide signatures for their PDAs with the +`invoke_signed` function. + +The first two fields of `invoke_signed` are the same as `invoke`, but an +additional `signers_seeds` field is required here. ```rust invoke_signed( @@ -246,38 +256,37 @@ runtime to verify that the PDA belongs to the calling program is for the calling program to supply the seeds used to generate the address in the `signers_seeds` field. -The Solana runtime will internally -call [`create_program_address`](https://docs.rs/solana-program/1.4.4/solana_program/pubkey/struct.Pubkey.html#method.create_program_address) -using the seeds provided and the `program_id` of the calling program. It can +The Solana runtime will internally call +[`create_program_address`](https://docs.rs/solana-program/latest/solana_program/pubkey/struct.Pubkey.html#method.create_program_address) +using the seeds provided and the `program_id` of the calling program. It will then compare the result against the addresses supplied in the instruction. If -any of the addresses match, then the runtime knows that indeed the program -associated with this address is the caller and thus is authorized to be a -signer. +any of the addresses match, the runtime knows that the program associated with +the address is the invoking and is authorized to be a signer. -### Best Practices and common pitfalls +### Best practices and common pitfalls #### Security checks -There are some common mistakes and things to remember when utilizing CPIs that -are important to your program's security and robustness. The first thing to -remember is that, as we know by now, we have no control over what information is -passed into our programs. For this reason, it's important to always verify the -`program_id`, accounts, and data passed into the CPI. Without these security -checks, someone could submit a transaction that invokes an instruction on a -completely different program than was expected, which is not ideal. - -Fortunately, there are inherent checks on the validity of any PDAs that are -marked as signers within the `invoke_signed` function. All other accounts and -`instruction_data` should be verified somewhere in your program code before -making the CPI. It's also important to make sure you're targeting the intended -instruction on the program you are invoking. The easiest way to do this is to -read the source code of the program you will be invoking just as you would if -you were constructing an instruction from the client side. +There are some common mistakes and important things to remember when utilizing +CPIs to ensure your program's security and robustness. First, keep in mind that +we have no control over the information passed into our programs. Therefore, +it's crucial to always verify the `program_id`, accounts, and data passed into +the CPI. Without these security checks, someone could submit a transaction that +invokes an instruction on a completely different program than expected, which is +a significant security risk. + +Fortunately, the `invoke_signed` function performs inherent checks on the +validity of any PDAs marked as signers. However, all other accounts and +`instruction_data` should be verified in your program code before making the +CPI. It's also important to ensure that you're targeting the intended +instruction in the program you're invoking. The simplest way to do this is to +review the source code of the program you're invoking, just as you would when +constructing an instruction from the client side. #### Common errors -There are some common errors you might receive when executing a CPI, they -usually mean you are constructing the CPI with incorrect information. For +There are common errors you might encounter when executing a CPI, which usually +indicate that you're constructing the CPI with incorrect information. For example, you may come across an error message similar to this: ```text @@ -285,104 +294,108 @@ EF1M4SPfKcchb6scq297y8FPCaLvj5kGjwMzjTM68wjA's signer privilege escalated Program returned error: "Cross-program invocation with unauthorized signer or writable account" ``` -This message is a little misleading, because “signer privilege escalated” does -not seem like a problem but, in reality, it means that you are incorrectly -signing for the address in the message. If you are using `invoke_signed` and -receive this error, then it likely means that the seeds you are providing are -incorrect. You can also find +This message can be misleading because "signer privilege escalated" might not +initially seem like an issue, but it actually means you are incorrectly signing +for the address in the message. If you're using `invoke_signed` and receive this +error, it's likely that the seeds you're providing are incorrect. You can check [an example transaction that failed with this error](https://explorer.solana.com/tx/3mxbShkerH9ZV1rMmvDfaAhLhJJqrmMjcsWzanjkARjBQurhf4dounrDCUkGunH1p9M4jEwef9parueyHVw6r2Et?cluster=devnet). -Another similar error is thrown when an account that's written to isn't marked -as `writable` inside the `AccountMeta` struct. +Another similar error occurs when an account is written to isn't marked as +`writable` in the `AccountMeta` struct. ```text 2qoeXa9fo8xVHzd2h9mVcueh6oK3zmAiJxCTySM5rbLZ's writable privilege escalated Program returned error: "Cross-program invocation with unauthorized signer or writable account" ``` + + Remember, any account whose data may be mutated by the program during execution -must be specified as writable. During execution, writing to an account that was -not specified as writable will cause the transaction to fail. Writing to an -account that is not owned by the program will cause the transaction to fail. Any -account whose lamport balance may be mutated by the program during execution -must be specified as writable. During execution, mutating the lamports of an -account that was not specified as writable will cause the transaction to fail. -While subtracting lamports from an account not owned by the program will cause -the transaction to fail, adding lamports to any account is allowed, as long is -it is mutable. +must be specified as `writable`. During execution, attempting to write to an +account that was not marked as `writable` will cause the transaction to fail. +Similarly, writing to an account not owned by the program will also cause the +transaction to fail. + +Any account whose lamport balance may be mutated by the program during execution +must also be specified as `writable`. Mutating the lamports of an account that +was not marked as `writable` will cause the transaction to fail. While +subtracting lamports from an account not owned by the program will cause the +transaction to fail, adding lamports to any account is allowed, as long as it is +mutable. To see this in action, view this [transaction in the explorer](https://explorer.solana.com/tx/ExB9YQJiSzTZDBqx4itPaa4TpT8VK4Adk7GU5pSoGEzNz9fa7PPZsUxssHGrBbJRnCvhoKgLCWnAycFB7VYDbBg?cluster=devnet). + ### Why CPIs matter? -CPIs are a very important feature of the Solana ecosystem and they make all -programs deployed interoperable with each other. With CPIs there is no need to -re-invent the wheel when it comes to development. This creates the opportunity -for building new protocols and applications on top of what's already been built, -just like building blocks or Lego bricks. It's important to remember that CPIs -are a two-way street and the same is true for any programs that you deploy! If -you build something cool and useful, developers have the ability to build on top -of what you've done or just plug your protocol into whatever it is that they are -building. Composability is a big part of what makes crypto so unique and CPIs -are what makes this possible on Solana. +Cross-Program Invocations (CPIs) are a crucial feature of the Solana ecosystem +because they make all deployed programs interoperable. With CPIs, there's no +need to reinvent the wheel during development, as they enable new protocols and +applications to be built on top of existing ones, much like building blocks or +Lego bricks. CPIs make composability possible, allowing developers to integrate +or build on top of your programs. If you build something cool and useful, other +developers can leverage your protocol in their projects. Composability is one of +the unique aspects of Web3, and CPIs enable this on Solana. Another important aspect of CPIs is that they allow programs to sign for their -PDAs. As you have probably noticed by now, PDAs are used very frequently in -Solana development because they allow programs to control specific addresses in -such a way that no external user can generate transactions with valid signatures -for those addresses. This can be _very_ useful for many applications in Web3 -(e.g. DeFi, NFTs, etc.) Without CPIs, PDAs would not be nearly as useful because -there would be no way for a program to sign transactions involving them - -essentially turning them black holes (once something is sent to a PDA, there -would be no way to get it back out w/o CPIs!) +PDAs. As you've likely noticed, PDAs are frequently used in Solana development +because they allow programs to control specific addresses in a way that prevents +external users from generating valid transactions with signatures for those +addresses. This feature is _extremely_ useful in many Web3 applications, such as +DeFi and NFTs. Without CPIs, PDAs would be far less useful since programs +wouldn't be able to sign transactions involving them—effectively turning them +into black holes where assets sent to a PDA couldn't be retrieved without CPIs! ## Lab -Now let's get some hands on experience with CPIs by making some additions to the -Movie Review program again. If you're dropping into this lesson without having -gone through prior lessons, the Movie Review program allows users to submit -movie reviews and have them stored in PDA accounts. +Now let's get some hands-on experience with CPIs by making some additions to the +Movie Review program. If you're dropping into this lesson without going through +the prior ones, the Movie Review program allows users to submit movie reviews, +which are stored in PDA accounts. -Last lesson, we added the ability to leave comments on other movie reviews using -PDAs. In this lesson, we're going to work on having the program mint tokens to -the reviewer or commenter anytime a review or comment is submitted. +In the +[program derived addresses lesson](/content/courses/native-onchain-development/program-derived-addresses.md), +we added the ability to leave comments on movie reviews using PDAs. In this +lesson, we'll work on having the program mint tokens to reviewers or commenters +whenever a review or comment is submitted. -To implement this, we'll have to invoke the SPL Token Program's `MintTo` -instruction using a CPI. If you need a refresher on tokens, token mints, and -minting new tokens, have a look at the -[Token Program lesson](/content/courses/tokens/token-program) before moving -forward with this lab. +To implement this, we'll invoke the SPL Token Program's `MintTo` instruction +using a CPI. If you need a refresher on tokens, token mints, and minting new +tokens, check out the +[Token Program lesson](/content/courses/tokens-and-nfts/token-program.md) before +moving forward with this lab. -#### 1. Get starter code and add dependencies +### 1. Get starter code and add dependencies -To get started, we will be using the final state of the Movie Review program -from the previous PDA lesson. So, if you just completed that lesson then you're -all set and ready to go. If you are just jumping in here, no worries, you can -[download the starter code here](https://github.com/Unboxed-Software/solana-movie-program/tree/solution-add-comments). -We'll be using the `solution-add-comments` branch as our starting point. +To get started, we'll be using the final state of the Movie Review program from +the +[previous PDA lesson](/content/courses/native-onchain-development/program-derived-addresses.md). +If you just completed that lesson, you're all set and ready to go. If you're +jumping in at this point, no worries! You can download the +[starter code from the `solution-add-comments` branch](https://github.com/solana-developers/movie-program/tree/solution-add-comments). -#### 2. Add dependencies to `Cargo.toml` +### 2. Add dependencies to Cargo.toml Before we get started we need to add two new dependencies to the `Cargo.toml` file underneath `[dependencies]`. We'll be using the `spl-token` and `spl-associated-token-account` crates in addition to the existing dependencies. -```text -spl-token = { version="~3.2.0", features = [ "no-entrypoint" ] } -spl-associated-token-account = { version="=1.0.5", features = [ "no-entrypoint" ] } +```toml +spl-token = { version="6.0.0", features = [ "no-entrypoint" ] } +spl-associated-token-account = { version="5.0.1", features = [ "no-entrypoint" ] } ``` After adding the above, run `cargo check` in your console to have cargo resolve your dependencies and ensure that you are ready to continue. Depending on your setup you may need to modify crate versions before moving on. -#### 3. Add necessary accounts to `add_movie_review` +### 3. Add necessary accounts to add_movie_review Because we want users to be minted tokens upon creating a review, it makes sense to add minting logic inside the `add_movie_review` function. Since we'll be -minting tokens, the `add_movie_review` instruction requires a few new accounts -to be passed in: +minting tokens, the `add_movie_review` instruction handler requires a few new +accounts to be passed in: - `token_mint` - the mint address of the token - `mint_auth` - address of the authority of the token mint @@ -393,7 +406,7 @@ to be passed in: We'll start by adding these new accounts to the area of the function that iterates through the passed in accounts: -```rust +```rust filename="processor.rs" // Inside add_movie_review msg!("Adding movie review..."); msg!("Title: {}", title); @@ -404,7 +417,6 @@ let account_info_iter = &mut accounts.iter(); let initializer = next_account_info(account_info_iter)?; let pda_account = next_account_info(account_info_iter)?; -let pda_counter = next_account_info(account_info_iter)?; let token_mint = next_account_info(account_info_iter)?; let mint_auth = next_account_info(account_info_iter)?; let user_ata = next_account_info(account_info_iter)?; @@ -416,12 +428,12 @@ There is no additional `instruction_data` required for the new functionality, so no changes need to be made to how data is deserialized. The only additional information that's needed is the extra accounts. -#### 4. Mint tokens to the reviewer in `add_movie_review` +### 4. Mint tokens to the reviewer in add_movie_review Before we dive into the minting logic, let's import the address of the Token program and the constant `LAMPORTS_PER_SOL` at the top of the file. -```rust +```rust filename="processor.rs" // Inside processor.rs use solana_program::native_token::LAMPORTS_PER_SOL; use spl_associated_token_account::get_associated_token_address; @@ -445,9 +457,9 @@ Let's go ahead and derive the token mint and mint authority addresses using the `find_program_address` function with the seeds “token_mint” and "token_auth," respectively. -```rust -// Mint tokens here -msg!("deriving mint authority"); +```rust filename="processor.rs" +// Mint tokens for adding a review +msg!("Deriving mint authority"); let (mint_pda, _mint_bump) = Pubkey::find_program_address(&[b"token_mint"], program_id); let (mint_auth_pda, mint_auth_bump) = Pubkey::find_program_address(&[b"token_auth"], program_id); @@ -456,7 +468,7 @@ let (mint_auth_pda, mint_auth_bump) = Next, we'll perform security checks against each of the new accounts passed into the program. Always remember to verify accounts! -```rust +```rust filename="processor.rs" if *token_mint.key != mint_pda { msg!("Incorrect token mint"); return Err(ReviewError::IncorrectAccountError.into()); @@ -468,7 +480,7 @@ if *mint_auth.key != mint_auth_pda { } if *user_ata.key != get_associated_token_address(initializer.key, token_mint.key) { - msg!("Incorrect token mint"); + msg!("Incorrect associated token account for initializer"); return Err(ReviewError::IncorrectAccountError.into()); } @@ -485,7 +497,7 @@ because it means we don't have to manually build the entire instruction from scratch. Rather, we can simply pass in the arguments required by the function. Here's the function signature: -```rust +```rust filename="processor.rs" // Inside the token program, returns an Instruction object pub fn mint_to( token_program_id: &Pubkey, @@ -501,7 +513,7 @@ Then we provide copies of the `token_mint`, `user_ata`, and `mint_auth` accounts. And, most relevant to this lesson, we provide the seeds used to find the `token_mint` address, including the bump seed. -```rust +```rust filename="processor.rs" msg!("Minting 10 tokens to User associated token account"); invoke_signed( // Instruction @@ -511,7 +523,7 @@ invoke_signed( user_ata.key, mint_auth.key, &[], - 10*LAMPORTS_PER_SOL, + 10 * LAMPORTS_PER_SOL, )?, // Account_infos &[token_mint.clone(), user_ata.clone(), mint_auth.clone()], @@ -532,18 +544,18 @@ If any of the addresses match the derived address, the runtime knows that the matching account is a PDA of this program and that the program is signing this transaction for this account. -At this point, the `add_movie_review` instruction should be fully functional and -will mint ten tokens to the reviewer when a review is created. +At this point, the `add_movie_review` instruction handler should be fully +functional and will mint ten tokens to the reviewer when a review is created. -#### 5. Repeat for `add_comment` +### 5. Repeat for add_comment Our updates to the `add_comment` function will be almost identical to what we did for the `add_movie_review` function above. The only difference is that we'll -change the amount of tokens minted for a comment from ten to five so that adding -reviews are weighted above commenting. First, update the accounts with the same +change the number of tokens minted for comment from ten to five so that adding +reviews is weighted above commenting. First, update the accounts with the same four additional accounts as in the `add_movie_review` function. -```rust +```rust filename="processor.rs" // Inside add_comment let account_info_iter = &mut accounts.iter(); @@ -562,9 +574,9 @@ Next, move to the bottom of the `add_comment` function just before the `Ok(())`. Then derive the token mint and mint authority accounts. Remember, both are PDAs derived from seeds "token_mint" and "token_authority" respectively. -```rust +```rust filename="processor.rs" // Mint tokens here -msg!("deriving mint authority"); +msg!("Deriving mint authority"); let (mint_pda, _mint_bump) = Pubkey::find_program_address(&[b"token_mint"], program_id); let (mint_auth_pda, mint_auth_bump) = Pubkey::find_program_address(&[b"token_auth"], program_id); @@ -572,7 +584,7 @@ let (mint_auth_pda, mint_auth_bump) = Next, verify that each of the new accounts is the correct account. -```rust +```rust filename="processor.rs" if *token_mint.key != mint_pda { msg!("Incorrect token mint"); return Err(ReviewError::IncorrectAccountError.into()); @@ -584,7 +596,7 @@ if *mint_auth.key != mint_auth_pda { } if *user_ata.key != get_associated_token_address(commenter.key, token_mint.key) { - msg!("Incorrect token mint"); + msg!("Incorrect associated token account for commenter"); return Err(ReviewError::IncorrectAccountError.into()); } @@ -597,7 +609,7 @@ if *token_program.key != TOKEN_PROGRAM_ID { Finally, use `invoke_signed` to send the `mint_to` instruction to the Token program, sending five tokens to the commenter. -```rust +```rust filename="processor.rs" msg!("Minting 5 tokens to User associated token account"); invoke_signed( // Instruction @@ -618,7 +630,7 @@ invoke_signed( Ok(()) ``` -#### 6. Set up the token mint +### 6. Set up the token mint We've written all the code needed to mint tokens to reviewers and commenters, but all of it assumes that there is a token mint at the PDA derived with the @@ -631,7 +643,7 @@ concepts associated with PDAs and CPIs multiple times, we're going to walk through this bit with less explanation than the prior steps. Start by adding a fourth instruction variant to the `MovieInstruction` enum in `instruction.rs`. -```rust +```rust filename="instruction.rs" pub enum MovieInstruction { AddMovieReview { title: String, @@ -651,40 +663,44 @@ pub enum MovieInstruction { ``` Be sure to add it to the `match` statement in the `unpack` function in the same -file under the variant `3`. +file under the discriminator `3`. -```rust +```rust filename="instruction.rs" impl MovieInstruction { pub fn unpack(input: &[u8]) -> Result { - let (&variant, rest) = input + let (&discriminator, rest) = input .split_first() .ok_or(ProgramError::InvalidInstructionData)?; - Ok(match variant { + + match discriminator { 0 => { - let payload = MovieReviewPayload::try_from_slice(rest).unwrap(); - Self::AddMovieReview { + let payload = MovieReviewPayload::try_from_slice(rest) + .map_err(|_| ProgramError::InvalidInstructionData)?; + Ok(Self::AddMovieReview { title: payload.title, rating: payload.rating, description: payload.description, - } + }) } 1 => { - let payload = MovieReviewPayload::try_from_slice(rest).unwrap(); - Self::UpdateMovieReview { + let payload = MovieReviewPayload::try_from_slice(rest) + .map_err(|_| ProgramError::InvalidInstructionData)?; + Ok(Self::UpdateMovieReview { title: payload.title, rating: payload.rating, description: payload.description, - } + }) } 2 => { - let payload = CommentPayload::try_from_slice(rest).unwrap(); - Self::AddComment { + let payload = CommentPayload::try_from_slice(rest) + .map_err(|_| ProgramError::InvalidInstructionData)?; + Ok(Self::AddComment { comment: payload.comment, - } + }) } - 3 => Self::InitializeMint, + 3 => Ok(Self::InitializeMint), _ => return Err(ProgramError::InvalidInstructionData), - }) + } } } ``` @@ -693,7 +709,7 @@ In the `process_instruction` function in the `processor.rs` file, add the new instruction to the `match` statement and call a function `initialize_token_mint`. -```rust +```rust filename="processor.rs" pub fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], @@ -723,9 +739,9 @@ mint account, and then initialize the token mint. We won't explain all of this in detail, but it's worth reading through the code, especially given that the creation and initialization of the token mint both involve CPIs. Again, if you need a refresher on tokens and mints, have a look at the -[Token Program lesson](/content/courses/tokens/token-program). +[Token Program lesson](/content/courses/tokens-and-nfts/token-program.md). -```rust +```rust filename="processor.rs" pub fn initialize_token_mint(program_id: &Pubkey, accounts: &[AccountInfo]) -> ProgramResult { let account_info_iter = &mut accounts.iter(); @@ -797,19 +813,193 @@ pub fn initialize_token_mint(program_id: &Pubkey, accounts: &[AccountInfo]) -> P } ``` -#### 7. Build and deploy +### 7. Build and Deploy Now we're ready to build and deploy our program! You can build the program by -running `cargo build-bpf` and then running the command that is returned, it -should look something like `solana program deploy `. +running `cargo build-sbf`. -Before you can start testing whether or not adding a review or comment sends you -tokens, you need to initialize the program's token mint. You can use -[this script](https://github.com/Unboxed-Software/solana-movie-token-client) to -do that. Once you'd cloned that repository, replace the `PROGRAM_ID` in -`index.ts` with your program's ID. Then run `npm install` and then `npm start`. -The script assumes you're deploying to Devnet. If you're deploying locally, then -make sure to tailor the script accordingly. +```sh +cargo build-sbf +``` + +Then deploy the program by running the `solana program deploy` command. + +```sh +solana program deploy target/deploy/.so +``` + +Upon successful deployment, you'll receive a Program ID. For example: + +```sh +Program Id: AzKatnACpNwQxWRs2YyPovsGhgsYVBiTmC3TL4t72eJW +``` + +If you encounter an "insufficient funds" error during deployment, you may need +to add SOL to your deployment wallet. Use the Solana CLI to request an airdrop: + +```sh +solana airdrop 2 +``` + +After receiving the airdrop, attempt the deployment again. + + + +Ensure your Solana CLI is configured for the correct network (`Localnet`, +`devnet`, `testnet`, or `mainnet-beta`) before deploying or requesting airdrops. + + +If you encounter the following error during program deployment, it indicates +that your program size needs to be extended: + +```sh +Error: Deploying program failed: RPC response error -32002: Transaction simulation failed: Error processing Instruction 0: account data too small for instruction [3 log messages ] +``` + +To resolve this, if you're using Solana CLI version 1.18 or later, run the +following command: + +```sh +solana program extend PROGRAM_ID 20000 -u d -k KEYPAIR_FILE_PATH +``` + +Replace `PROGRAM_ID` and `KEYPAIR_FILE_PATH` with your own values. For example: + +```sh + solana program extend HMDRWmYvL2A9xVKZG8iA1ozxi4gMKiHQz9mFkURKrG4 20000 -u d -k ~/.config/solana/id.json +``` + + + +Ensure you are passing the correct Solana's JSON RPC or moniker URL parameter in +the command. + +```bash +-u, --url URL for Solana's JSON RPC or moniker (or their first letter): [mainnet-beta, testnet, devnet, localhost] +``` + + + +Before testing whether adding a review or comment sends tokens, you need to +initialize the program's token mint. + +First, create and initialize an empty NPM project, then change into the project +directory: + +```bash +mkdir movie-token-client +cd movie-token-client +npm init -y +``` + +Install all the required dependencies. + +```bash +npm i @solana/web3.js @solana-developers/helpers@2.5.2 + +npm i --save-dev esrun +``` + +Create a new file named `initialize-review-token-mint.ts`: + +```bash +touch initialize-review-token-mint.ts +``` + +Copy the code below into the newly created file. + +```typescript filename="initialize-review-token-mint.ts" +import { TOKEN_PROGRAM_ID } from "@solana/spl-token"; +import { + Connection, + LAMPORTS_PER_SOL, + PublicKey, + Transaction, + TransactionInstruction, + sendAndConfirmTransaction, + SystemProgram, + SYSVAR_RENT_PUBKEY, +} from "@solana/web3.js"; +import { + initializeKeypair, + airdropIfRequired, + getExplorerLink, +} from "@solana-developers/helpers"; + +const PROGRAM_ID = new PublicKey( + "AzKatnACpNwQxWRs2YyPovsGhgsYVBiTmC3TL4t72eJW", +); + +const LOCALHOST_RPC_URL = "http://localhost:8899"; +const AIRDROP_AMOUNT = 2 * LAMPORTS_PER_SOL; +const MINIMUM_BALANCE_FOR_RENT_EXEMPTION = 1 * LAMPORTS_PER_SOL; + +const connection = new Connection(LOCALHOST_RPC_URL); +const userKeypair = await initializeKeypair(connection); + +await airdropIfRequired( + connection, + userKeypair.publicKey, + AIRDROP_AMOUNT, + MINIMUM_BALANCE_FOR_RENT_EXEMPTION, +); + +const [tokenMintPDA] = PublicKey.findProgramAddressSync( + [Buffer.from("token_mint")], + PROGRAM_ID, +); + +const [tokenAuthPDA] = PublicKey.findProgramAddressSync( + [Buffer.from("token_auth")], + PROGRAM_ID, +); + +const INITIALIZE_MINT_INSTRUCTION = 3; + +const initializeMintInstruction = new TransactionInstruction({ + keys: [ + { pubkey: userKeypair.publicKey, isSigner: true, isWritable: false }, + { pubkey: tokenMintPDA, isSigner: false, isWritable: true }, + { pubkey: tokenAuthPDA, isSigner: false, isWritable: false }, + { pubkey: SystemProgram.programId, isSigner: false, isWritable: false }, + { pubkey: TOKEN_PROGRAM_ID, isSigner: false, isWritable: false }, + { pubkey: SYSVAR_RENT_PUBKEY, isSigner: false, isWritable: false }, + ], + programId: PROGRAM_ID, + data: Buffer.from([INITIALIZE_MINT_INSTRUCTION]), +}); + +const transaction = new Transaction().add(initializeMintInstruction); + +try { + const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [userKeypair], + ); + const explorerLink = getExplorerLink("transaction", transactionSignature); + + console.log(`Transaction submitted: ${explorerLink}`); +} catch (error) { + if (error instanceof Error) { + throw new Error( + `Failed to initialize program token mint: ${error.message}`, + ); + } else { + throw new Error("An unknown error occurred"); + } +} +``` + +Replace `PROGRAM_ID` in `initialize-review-token-mint.ts` with your program ID. +Then run the file with: + +```bash +npx esrun initialize-review-token-mint.ts +``` + +Your token mint will now be initialized. The script assumes you're deploying to +localnet. If you're deploying to devnet, update the script accordingly. Once you've initialized your token mint, you can use the [Movie Review frontend](https://github.com/Unboxed-Software/solana-movie-frontend/tree/solution-add-tokens) @@ -821,9 +1011,8 @@ add a comment, you should receive 5 tokens. They won't have a fancy name or image since we didn't add any metadata to the token, but you get the idea. If you need more time with the concepts from this lesson or got stuck along the -way, feel free to -[take a look at the solution code](https://github.com/Unboxed-Software/solana-movie-program/tree/solution-add-tokens). -Note that the solution to this lab is on the `solution-add-tokens` branch. +way, feel free to take a look at the +[solution code in `solution-add-tokens` branch](https://github.com/solana-developers/movie-program/tree/solution-add-tokens). ## Challenge @@ -841,6 +1030,7 @@ that is possible and you now have the skills and knowledge to go and build something like it on your own! + Push your code to GitHub and [tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=ade5d386-809f-42c2-80eb-a6c04c471f53)! diff --git a/content/courses/native-onchain-development/metadata.yml b/content/courses/native-onchain-development/metadata.yml index 95dfd5b64..9d0a23347 100644 --- a/content/courses/native-onchain-development/metadata.yml +++ b/content/courses/native-onchain-development/metadata.yml @@ -12,6 +12,3 @@ lessons: - deserialize-custom-data-frontend - paging-ordering-filtering-data-frontend priority: 50 -# Uses out of date repos -# TODO: Superteam to update -isHidden: true diff --git a/content/courses/native-onchain-development/program-security.md b/content/courses/native-onchain-development/program-security.md index 1bbba4de8..61872f316 100644 --- a/content/courses/native-onchain-development/program-security.md +++ b/content/courses/native-onchain-development/program-security.md @@ -1,119 +1,125 @@ --- title: Create a Basic Program, Part 3 - Basic Security and Validation objectives: - - Explain the importance of "thinking like an attacker" - - Understand basic security practices - - Perform owner checks - - Perform signer checks - - Validate accounts passed into the program - - Perform basic data validation -description: "How to implement account checks and validate instruction data." + - Understand why "thinking like an attacker" is essential in securing Solana + programs. + - Learn and implement core security practices to protect your program. + - Perform owner and signer checks to verify account ownership and transaction + authenticity. + - Validate the accounts passed into your program to ensure they are what you + expect. + - Conduct basic data validation to prevent invalid or malicious input from + compromising your program. +description: + "Learn how to secure your Solana program with ownership, signer, and account + validation checks." --- ## Summary -- **Thinking like an attacker** means asking "How do I break this?" -- Perform **owner checks** to ensure that the provided account is owned by the - public key you expect, e.g. ensuring that an account you expect to be a PDA is - owned by `program_id` -- Perform **signer checks** to ensure that any account modification has been - signed by the right party or parties -- **Account validation** entails ensuring that provided accounts are the - accounts you expect them to be, e.g. deriving PDAs with the expected seeds to - make sure the address matches the provided account -- **Data validation** entails ensuring that any provided data meets the criteria - required by the program +- **Thinking like an attacker** is about shifting your mindset to proactively + identify potential security gaps by asking, "How do I break this?" +- **Owner checks** ensure that an account is controlled by the expected public + key, such as verifying that a PDA (Program Derived Address) is owned by the + program. +- **Signer checks** confirm that the right parties have signed the transaction, + allowing for safe modifications to accounts. +- **Account validation** is used to ensure that the accounts passed into your + program match your expectations, like checking the correctness of a PDA's + derivation. +- **Data validation** verifies that the instruction data provided to your + program adheres to specific rules or constraints, ensuring it doesn't lead to + unintended behavior. ## Lesson -In the last two lessons we worked through building a Movie Review program -together. The end result is pretty cool! It's exciting to get something working -in a new development environment. - -Proper program development, however, doesn't end at "get it working." It's -important to think through the possible failure points in your code to mitigate -them. Failure points are where undesirable behavior in your code could -potentially occur. Whether the undesirable behavior happens due to users -interacting with your program in unexpected ways or bad actors intentionally -trying to exploit your program, anticipating failure points is essential to -secure program development. +In the previous lessons +[deserialize instruction data](/content/courses/native-onchain-development/deserialize-instruction-data.md) +and +[program state management](/content/courses/native-onchain-development/program-state-management.md), +we built a Movie Review program, and while getting it to function was exciting, +secure development doesn't stop at "just working." It's critical to understand +potential failure points and take proactive steps to secure your program against +both accidental misuse and intentional exploitation. Remember, **you have no control over the transactions that will be sent to your program once it's deployed**. You can only control how your program handles them. While this lesson is far from a comprehensive overview of program security, we'll cover some of the basic pitfalls to look out for. -### Think like an attacker - -[Neodyme](https://workshop.neodyme.io/) gave a presentation at Breakpoint 2021 -entitled "Think Like An Attacker: Bringing Smart Contracts to Their Break(ing) -Point." If there's one thing you take away from this lesson, it's that you -should think like an attacker. - -In this lesson, of course, we cannot cover everything that could possibly go -wrong with your programs. Ultimately, every program will have different security -risks associated with it. While understanding common pitfalls is _essential_ to -engineering good programs, it is _insufficient_ for deploying secure ones. To -have the broadest security coverage possible, you have to approach your code -with the right mindset. +### Think Like an Attacker -As Neodyme mentioned in their presentation, the right mindset requires moving -from the question "Is this broken?" to "How do I break this?" This is the first -and most essential step in understanding what your code _actually does_ as -opposed to what you wrote it to do. +A fundamental principle in secure programming is adopting an "attacker's +mindset." This means considering every possible angle someone might use to break +or exploit your program. -#### All programs can be broken +In their presentation at Breakpoint 2021, +[Neodyme](https://workshop.neodyme.io/) emphasized that secure program +development isn't just about identifying when something is broken; it's about +exploring how it can be broken. By asking, "How do I break this?" you shift from +simply testing expected functionality to uncovering potential weaknesses in the +implementation itself. -It's not a question of "if." +All programs, regardless of complexity, can be exploited. The goal isn't to +achieve absolute security (which is impossible) but to make it as difficult as +possible for malicious actors to exploit weaknesses. By adopting this mindset, +you're better prepared to identify and close gaps in your program's security. -Rather, it's a question of "how much effort and dedication would it take." +#### All Programs Can Be Broken -Our job as developers is to close as many holes as possible and increase the -effort and dedication required to break our code. For example, in the Movie -Review program we built together over the last two lessons, we wrote code to -create new accounts to store movie reviews. If we take a closer look at the -code, however, we'll notice how the program also facilitates a lot of -unintentional behavior we could easily catch by asking "How do I break this?" -We'll dig into some of these problems and how to fix them in this lesson, but -remember that memorizing a few pitfalls isn't sufficient. It's up to you to -change your mindset toward security. +Every program has vulnerabilities. The question isn't whether it can be broken, +but how much effort it takes. As developers, our goal is to close as many +security gaps as possible and increase the effort required to break our code. +For example, while our Movie Review program creates accounts to store reviews, +there may be unintentional behaviors that could be caught by thinking like an +attacker. In this lesson, we'll explore these issues and how to address them. ### Error handling Before we dive into some of the common security pitfalls and how to avoid them, -it's important to know how to use errors in your program. While your code can -handle some issues gracefully, other issues will require that your program stop -execution and return a program error. +it's important to know how to use errors in your program. Security issues in a +Solana program often requires terminating the execution with a meaningful error. +Not all errors are catastrophic, but some should result in stopping the program +and returning an appropriate error code to prevent further processing. -#### How to create errors +#### Creating Custom Errors -While the `solana_program` crate provides a `ProgramError` enum with a list of -generic errors we can use, it will often be useful to create your own. Your -custom errors will be able to provide more context and detail while you're -debugging your code. +Solana's +[`solana_program`](https://docs.rs/solana-program/latest/solana_program/) crate +provides a generic +[`ProgramError`](https://docs.rs/solana-program/latest/solana_program/program_error/enum.ProgramError.html) +enum for error handling. However, custom errors allow you to provide more +detailed, context-specific information that helps during debugging and testing. We can define our own errors by creating an enum type listing the errors we want to use. For example, the `NoteError` contains variants `Forbidden` and `InvalidLength`. The enum is made into a Rust `Error` type by using the `derive` -attribute macro to implement the `Error` trait from the `thiserror` library. -Each error type also has its own `#[error("...")]` notation. This lets you -provide an error message for each particular error type. +attribute macro to implement the `Error` trait from the +[`thiserror`](https://docs.rs/thiserror/latest/thiserror/) library. Each error +type also has its own `#[error("...")]` notation. This lets you provide an error +message for each particular error type. + +Here's an example of how you can define custom errors in your program: ```rust -use solana_program::{program_error::ProgramError}; +use solana_program::program_error::ProgramError; use thiserror::Error; -#[derive(Error)] +#[derive(Error, Debug)] pub enum NoteError { - #[error("Wrong note owner")] + #[error("Unauthorized access - You don't own this note.")] Forbidden, - #[error("Text is too long")] + #[error("Invalid note length - The text exceeds the allowed limit.")] InvalidLength, } ``` -#### How to return errors +In this example, we create custom errors for unauthorized access and invalid +data input (such as note length). Defining custom errors gives us greater +flexibility when debugging or explaining what went wrong during execution. + +#### Returning Errors The compiler expects errors returned by the program to be of type `ProgramError` from the `solana_program` crate. That means we won't be able to return our @@ -138,54 +144,66 @@ if pda != *note_pda.key { } ``` -### Basic security checks +This ensures the program gracefully handles errors and provides meaningful +feedback when things go wrong. -While these won't comprehensively secure your program, there are a few security -checks you can keep in mind to fill in some of the larger gaps in your code: +### Basic Security Checks -- Ownership checks - used to verify that an account is owned by the program -- Signer checks - used to verify that an account has signed a transaction -- General Account Validation - used to verify that an account is the expected - account -- Data Validation - used to verify the inputs provided by a user +To ensure your Solana program is resilient against common vulnerabilities, you +should incorporate key security checks. These are critical for detecting invalid +accounts or unauthorized transactions and preventing undesired behavior. #### Ownership checks -An ownership check verifies that an account is owned by the expected public key. -Let's use the note-taking app example that we've referenced in previous lessons. -In this app, users can create, update, and delete notes that are stored by the -program in PDA accounts. - -When a user invokes the `update` instruction, they also provide a `pda_account`. -We presume the provided `pda_account` is for the particular note they want to -update, but the user can input any instruction data they want. They could even -potentially send data which matches the data format of a note account but was -not also created by the note-taking program. This security vulnerability is one -potential way to introduce malicious code. +An ownership check verifies that an account is owned by the expected program. +For instance, if your program relies on PDAs (Program Derived Addresses), you +want to ensure that those PDAs are controlled by your program and not by an +external party. + +Let's use the note-taking app example that we've referenced in the +[deserialize instruction data](/content/courses/native-onchain-development/deserialize-instruction-data.md) +and +[program state management](/content/courses/native-onchain-development/program-state-management.md) +lessons. In this app, users can create, update, and delete notes that are stored +by the program in PDA accounts. + +When a user invokes the `update` instruction handler, they also provide a +`pda_account`. We presume the provided `pda_account` is for the particular note +they want to update, but the user can input any instruction data they want. They +could even potentially send data that matches the data format of a note account +but was not also created by the note-taking program. This security vulnerability +is one potential way to introduce malicious code. The simplest way to avoid this problem is to always check that the owner of an account is the public key you expect it to be. In this case, we expect the note account to be a PDA account owned by the program itself. When this is not the case, we can report it as an error accordingly. +Here's how you can perform an ownership check to verify that an account is owned +by the program: + ```rust if note_pda.owner != program_id { return Err(ProgramError::InvalidNoteAccount); } ``` -As a side note, using PDAs whenever possible is more secure than trusting -externally-owned accounts, even if they are owned by the transaction signer. The -only accounts that the program has complete control over are PDA accounts, -making them the most secure. +In this example, we check if the `note_pda` is owned by the program itself +(denoted by `program_id`). Ownership checks like these prevent unauthorized +entities from tampering with critical accounts. + + + +PDAs are often considered to be trusted stores of a program's state. Ensuring +the correct program owns the PDAs is a fundamental way to prevent malicious +behavior. -#### Signer checks +#### Signer Checks -A signer check simply verifies that the right parties have signed a transaction. -In the note-taking app, for example, we would want to verify that the note -creator signed the transaction before we process the `update` instruction. -Otherwise, anyone can update another user's notes by simply passing in the -user's public key as the initializer. +Signer checks confirm that a transaction has been signed by the correct parties. +In the note-taking app, for example, we want to verify that only the note +creator can update the note. Without this check, anyone could attempt to modify +another user's note by passing in their public key. ```rust if !initializer.is_signer { @@ -194,39 +212,48 @@ if !initializer.is_signer { } ``` -#### General account validation +By verifying that the initializer has signed the transaction, we ensure that +only the legitimate owner of the account can perform actions on it. -In addition to checking the signers and owners of accounts, it's important to -ensure that the provided accounts are what your code expects them to be. For -example, you would want to validate that a provided PDA account's address can be -derived with the expected seeds. This ensures that it is the account you expect -it to be. +#### Account Validation + +Account validation checks that the accounts passed into the program are correct +and valid. This is often done by deriving the expected account using known seeds +(for PDAs) and comparing it to the passed account. -In the note-taking app example, that would mean ensuring that you can derive a -matching PDA using the note creator's public key and the ID as seeds (that's -what we're assuming was used when creating the note). That way a user couldn't -accidentally pass in a PDA account for the wrong note or, more importantly, that -the user isn't passing in a PDA account that represents somebody else's note -entirely. +For instance, in the note-taking app, you can derive the expected PDA using the +creator's public key and note ID, and then validate that it matches the provided +account: ```rust -let (pda, bump_seed) = Pubkey::find_program_address(&[note_creator.key.as_ref(), id.as_bytes().as_ref(),], program_id); +let (expected_pda, bump_seed) = Pubkey::find_program_address( + &[ + note_creator.key.as_ref(), + id.as_bytes().as_ref(), + ], + program_id +); -if pda != *note_pda.key { +if expected_pda != *note_pda.key { msg!("Invalid seeds for PDA"); return Err(ProgramError::InvalidArgument) } ``` -### Data validation +This check prevents a user from accidentally (or maliciously) passing the wrong +PDA or one that belongs to someone else. By validating the PDA's derivation, you +ensure the program is acting on the correct account. -Similar to validating accounts, you should also validate any data provided by -the client. +### Data Validation -For example, you may have a game program where a user can allocate character -attribute points to various categories. You may have a maximum limit in each -category of 100, in which case you would want to verify that the existing -allocation of points plus the new allocation doesn't exceed the maximum. +Data validation ensures that the input provided to your program meets the +expected criteria. This is crucial for avoiding incorrect or malicious data that +could cause the program to behave unpredictably. + +For example, let's say your program allows users to allocate points to a +character's attributes, but each attribute has a maximum allowed value. Before +making any updates, you should check that the new allocation does not exceed the +defined limit: ```rust if character.agility + new_agility > 100 { @@ -235,8 +262,8 @@ if character.agility + new_agility > 100 { } ``` -Or, the character may have an allowance of attribute points they can allocate -and you want to make sure they don't exceed that allowance. +Similarly, you should check that the user is not exceeding their allowed number +of points: ```rust if attribute_allowance < new_agility { @@ -245,10 +272,9 @@ if attribute_allowance < new_agility { } ``` -Without these checks, program behavior would differ from what you expect. In -some cases, however, it's more than just an issue of undefined behavior. -Sometimes failure to validate data can result in security loopholes that are -financially devastating. +Without these validations, the program could end up in an undefined state or be +exploited by malicious actors, potentially causing financial loss or +inconsistent behavior. For example, imagine that the character referenced in these examples is an NFT. Further, imagine that the program allows the NFT to be staked to earn token @@ -260,45 +286,50 @@ stakers. #### Integer overflow and underflow -Rust integers have fixed sizes. This means they can only support a specific -range of numbers. An arithmetic operation that results in a higher or lower -value than what is supported by the range will cause the resulting value to wrap -around. For example, a `u8` only supports numbers 0-255, so the result of -addition that would be 256 would actually be 0, 257 would be 1, etc. +One of the common pitfalls when working with integers in Rust (and in Solana +programs) is handling integer overflow and underflow. Rust integers have fixed +sizes and can only hold values within a certain range. When a value exceeds that +range, it wraps around, leading to unexpected results. -This is always important to keep in mind, but especially so when dealing with -any code that represents true value, such as depositing and withdrawing tokens. +For example, with a `u8` (which holds values between 0 and 255), adding 1 to 255 +results in a value of 0 (overflow). To avoid this, you should use checked math +functions like +[`checked_add()`](https://doc.rust-lang.org/std/primitive.u8.html#method.checked_add) +and +[`checked_sub()`](https://doc.rust-lang.org/std/primitive.u8.html#method.checked_sub): To avoid integer overflow and underflow, either: 1. Have logic in place that ensures overflow or underflow _cannot_ happen or -2. Use checked math like `checked_add` instead of `+` +2. Use checked math like `checked_add()` instead of `+` + ```rust let first_int: u8 = 5; let second_int: u8 = 255; - let sum = first_int.checked_add(second_int); + let sum = first_int.checked_add(second_int) + .ok_or(ProgramError::ArithmeticOverflow)?; ``` ## Lab -Let's practice together with the Movie Review program we've worked on in -previous lessons. No worries if you're just jumping into this lesson without -having done the previous lesson - it should be possible to follow along either -way. +In this lab, we will build upon the Movie Review program that allows users to +store movie reviews in PDA accounts. If you haven't completed the previous +lessons +[deserialize instruction data](/content/courses/native-onchain-development/deserialize-instruction-data.md) +and +[program state management](/content/courses/native-onchain-development/program-state-management.md), +don't worry—this guide is self-contained. -As a refresher, the Movie Review program lets users store movie reviews in PDA -accounts. Last lesson, we finished implementing the basic functionality of -adding a movie review. Now, we'll add some security checks to the functionality -we've already created and add the ability to update a movie review in a secure -manner. - -Just as before, we'll be using [Solana Playground](https://beta.solpg.io/) to -write, build, and deploy our code. +The Movie Review program lets users add and update reviews in PDA accounts. In +previous lessons, we implemented basic functionality for adding reviews. Now, +we'll add security checks and implement an update feature in a secure manner. +We'll use [Solana Playground](https://beta.solpg.io/) to write, build, and +deploy our program. ### 1. Get the starter code To begin, you can find -[the movie review starter code](https://beta.solpg.io/62b552f3f6273245aca4f5c9). +[the movie review starter code](https://beta.solpg.io/62b552f3f6273245aca4f5c9). If you've been following along with the Movie Review labs, you'll notice that we've refactored our program. @@ -317,12 +348,12 @@ defining custom errors. The complete file structure is as follows: - **state.rs -** serialize and deserialize state - **error.rs -** custom program errors -In addition to some changes to file structure, we've updated a small amount of -code that will let this lab be more focused on security without having you write -unnecessary boiler plate. +In addition to some changes to the file structure, we've updated a small amount +of code that will let this lab be more focused on security without having you +write unnecessary boilerplate. Since we'll be allowing updates to movie reviews, we also changed `account_len` -in the `add_movie_review` function (now in `processor.rs`). Instead of +in the `add_movie_review()` function (now in `processor.rs`). Instead of calculating the size of the review and setting the account length to only as large as it needs to be, we're simply going to allocate 1000 bytes to each review account. This way, we don't have to worry about reallocating size or @@ -356,8 +387,7 @@ that checks the `is_initialized` field on the `MovieAccountState` struct. `MovieAccountState` has a known size and provides for some compiler optimizations. -```rust -// inside state.rs +```rust filename="state.rs" impl Sealed for MovieAccountState {} impl IsInitialized for MovieAccountState { @@ -367,27 +397,21 @@ impl IsInitialized for MovieAccountState { } ``` -Before moving on, make sure you have a solid grasp on the current state of the +Before moving on, make sure you have a solid grasp of the current state of the program. Look through the code and spend some time thinking through any spots that are confusing to you. It may be helpful to compare the starter code to the [solution code from the previous lesson](https://beta.solpg.io/62b23597f6273245aca4f5b4). ### 2. Custom Errors -Let's begin by writing our custom program errors. We'll need errors that we can -use in the following situations: - -- The update instruction has been invoked on an account that hasn't been - initialized yet -- The provided PDA doesn't match the expected or derived PDA -- The input data is larger than the program allows -- The rating provided does not fall in the 1-5 range +We'll define custom errors to handle cases like uninitialized accounts, invalid +PDA matches, exceeding data limits, and invalid ratings (ratings must be between +1 and 5). These errors will be added to the `error.rs` file: The starter code includes an empty `error.rs` file. Open that file and add errors for each of the above cases. -```rust -// inside error.rs +```rust filename="error.rs" use solana_program::{program_error::ProgramError}; use thiserror::Error; @@ -414,19 +438,16 @@ impl From for ProgramError { } ``` -Note that in addition to adding the error cases, we also added the -implementation that lets us convert our error into a `ProgramError` type as -needed. +Note that in addition to adding the error cases, we also added an implementation +that lets us convert our error into a `ProgramError` type as needed. -Before moving on, let's bring `ReviewError` into scope in the `processor.rs`. We -will be using these errors shortly when we add our security checks. +After adding the errors, import `ReviewError` in `processor.rs` to use them. -```rust -// inside processor.rs +```rust filename="processor.rs" use crate::error::ReviewError; ``` -### 3. Add security checks to `add_movie_review` +### 3. Add Security Checks to add_movie_review Now that we have errors to use, let's implement some security checks to our `add_movie_review` function. @@ -438,7 +459,7 @@ also a signer on the transaction. This ensures that you can't submit movie reviews impersonating somebody else. We'll put this check right after iterating through the accounts. -```rust +```rust filename="processor.rs" let account_info_iter = &mut accounts.iter(); let initializer = next_account_info(account_info_iter)?; @@ -455,11 +476,11 @@ if !initializer.is_signer { Next, let's make sure the `pda_account` passed in by the user is the `pda` we expect. Recall we derived the `pda` for a movie review using the `initializer` -and `title` as seeds. Within our instruction we'll derive the `pda` again and +and `title` as seeds. Within our instruction, we'll derive the `pda` again and then check if it matches the `pda_account`. If the addresses do not match, we'll return our custom `InvalidPDA` error. -```rust +```rust filename="processor.rs" // Derive PDA and check that it matches client let (pda, _bump_seed) = Pubkey::find_program_address(&[initializer.key.as_ref(), account_data.title.as_bytes().as_ref(),], program_id); @@ -477,7 +498,7 @@ We'll start by making sure `rating` falls within the 1 to 5 scale. If the rating provided by the user outside of this range, we'll return our custom `InvalidRating` error. -```rust +```rust filename="processor.rs" if rating > 5 || rating < 1 { msg!("Rating cannot be higher than 5"); return Err(ReviewError::InvalidRating.into()) @@ -488,7 +509,7 @@ Next, let's check that the content of the review does not exceed the 1000 bytes we've allocated for the account. If the size exceeds 1000 bytes, we'll return our custom `InvalidDataLength` error. -```rust +```rust filename="processor.rs" let total_len: usize = 1 + 1 + (4 + title.len()) + (4 + description.len()); if total_len > 1000 { msg!("Data length is larger than 1000 bytes"); @@ -496,20 +517,20 @@ if total_len > 1000 { } ``` -Lastly, let's checking if the account has already been initialized by calling -the `is_initialized` function we implemented for our `MovieAccountState`. If the +Lastly, let's check if the account has already been initialized by calling the +`is_initialized` function we implemented for our `MovieAccountState`. If the account already exists, then we will return an error. -```rust +```rust filename="processor.rs" if account_data.is_initialized() { msg!("Account already initialized"); return Err(ProgramError::AccountAlreadyInitialized); } ``` -All together, the `add_movie_review` function should look something like this: +Altogether, the `add_movie_review()` function should look something like this: -```rust +```rust filename="processor.rs" pub fn add_movie_review( program_id: &Pubkey, accounts: &[AccountInfo], @@ -592,17 +613,12 @@ pub fn add_movie_review( } ``` -### 4. Support movie review updates in `MovieInstruction` - -Now that `add_movie_review` is more secure, let's turn our attention to -supporting the ability to update a movie review. +### 4. Support Movie Review Updates in MovieInstruction -Let's begin by updating `instruction.rs`. We'll start by adding an -`UpdateMovieReview` variant to `MovieInstruction` that includes embedded data -for the new title, rating, and description. +Next, we'll modify `instruction.rs` to add support for updating movie reviews. +We'll introduce a new `UpdateMovieReview()` variant in `MovieInstruction`: -```rust -// inside instruction.rs +```rust filename="instruction.rs" pub enum MovieInstruction { AddMovieReview { title: String, @@ -618,13 +634,12 @@ pub enum MovieInstruction { ``` The payload struct can stay the same since aside from the variant type, the -instruction data is the same as what we used for `AddMovieReview`. +instruction data is the same as what we used for `AddMovieReview()`. -Lastly, in the `unpack` function we need to add `UpdateMovieReview` to the match -statement. +We'll also update the `unpack()` function to handle `UpdateMovieReview()`. -```rust -// inside instruction.rs +```rust filename="instruction.rs" +// Inside instruction.rs impl MovieInstruction { pub fn unpack(input: &[u8]) -> Result { let (&variant, rest) = input.split_first().ok_or(ProgramError::InvalidInstructionData)?; @@ -644,38 +659,38 @@ impl MovieInstruction { } ``` -### 5. Define `update_movie_review` function +### 5. Define update_movie_review Function Now that we can unpack our `instruction_data` and determine which instruction of -the program to run, we can add `UpdateMovieReview` to the match statement in -the `process_instruction` function in the `processor.rs` file. +the program to run, we can add `UpdateMovieReview()` to the match statement in +the `process_instruction()` function in the `processor.rs` file. -```rust -// inside processor.rs +```rust filename="processor.rs" +// Inside processor.rs pub fn process_instruction( program_id: &Pubkey, accounts: &[AccountInfo], instruction_data: &[u8] ) -> ProgramResult { - // unpack instruction data + // Unpack instruction data let instruction = MovieInstruction::unpack(instruction_data)?; match instruction { MovieInstruction::AddMovieReview { title, rating, description } => { add_movie_review(program_id, accounts, title, rating, description) }, - // add UpdateMovieReview to match against our new data structure + // Add UpdateMovieReview to match against our new data structure MovieInstruction::UpdateMovieReview { title, rating, description } => { - // make call to update function that we'll define next + // Make call to update function that we'll define next update_movie_review(program_id, accounts, title, rating, description) } } } ``` -Next, we can define the new `update_movie_review` function. The definition +Next, we can define the new `update_movie_review()` function. The definition should have the same parameters as the definition of `add_movie_review`. -```rust +```rust filename="processor.rs" pub fn update_movie_review( program_id: &Pubkey, accounts: &[AccountInfo], @@ -687,16 +702,16 @@ pub fn update_movie_review( } ``` -### 6. Implement `update_movie_review` function +### 6. Implement update_movie_review Function All that's left now is to fill in the logic for updating a movie review. Only let's make it secure from the start. -Just like the `add_movie_review` function, let's start by iterating through the -accounts. The only accounts we'll need are the first two: `initializer` and +Just like the `add_movie_review()` function, let's start by iterating through +the accounts. The only accounts we'll need are the first two: `initializer` and `pda_account`. -```rust +```rust filename="processor.rs" pub fn update_movie_review( program_id: &Pubkey, accounts: &[AccountInfo], @@ -722,7 +737,7 @@ Before we continue, let's implement some basic security checks. We'll start with an ownership check on for `pda_account` to verify that it is owned by our program. If it isn't, we'll return an `InvalidOwner` error. -```rust +```rust filename="processor.rs" if pda_account.owner != program_id { return Err(ProgramError::InvalidOwner) } @@ -736,7 +751,7 @@ data for a movie review, we want to ensure that the original `initializer` of the review has approved the changes by signing the transaction. If the `initializer` did not sign the transaction, we'll return an error. -```rust +```rust filename="processor.rs" if !initializer.is_signer { msg!("Missing required signature"); return Err(ProgramError::MissingRequiredSignature) @@ -748,9 +763,9 @@ if !initializer.is_signer { Next, let's check that the `pda_account` passed in by the user is the PDA we expect by deriving the PDA using `initializer` and `title` as seeds. If the addresses do not match, we'll return our custom `InvalidPDA` error. We'll -implement this the same way we did in the `add_movie_review` function. +implement this the same way we did in the `add_movie_review()` function. -```rust +```rust filename="processor.rs" // Derive PDA and check that it matches client let (pda, _bump_seed) = Pubkey::find_program_address(&[initializer.key.as_ref(), account_data.title.as_bytes().as_ref(),], program_id); @@ -760,13 +775,13 @@ if pda != *pda_account.key { } ``` -#### Unpack `pda_account` and perform data validation +#### Unpack pda_account and Perform Data Validation Now that our code ensures we can trust the passed in accounts, let's unpack the `pda_account` and perform some data validation. We'll start by unpacking `pda_account` and assigning it to a mutable variable `account_data`. -```rust +```rust filename="processor.rs" msg!("unpacking state account"); let mut account_data = try_from_slice_unchecked::(&pda_account.data.borrow()).unwrap(); msg!("borrowed account data"); @@ -785,13 +800,13 @@ if !account_data.is_initialized() { ``` Next, we need to validate the `rating`, `title`, and `description` data just -like in the `add_movie_review` function. We want to limit the `rating` to a +like in the `add_movie_review()` function. We want to limit the `rating` to a scale of 1 to 5 and limit the overall size of the review to be fewer than 1000 -bytes. If the rating provided by the user outside of this range, then we'll +bytes. If the rating provided by the user is outside of this range, then we'll return our custom `InvalidRating` error. If the review is too long, then we'll return our custom `InvalidDataLength` error. -```rust +```rust filename="processor.rs" if rating > 5 || rating < 1 { msg!("Rating cannot be higher than 5"); return Err(ReviewError::InvalidRating.into()) @@ -810,7 +825,7 @@ Now that we've implemented all of the security checks, we can finally update the movie review account by updating `account_data` and re-serializing it. At that point, we can return `Ok` from our program. -```rust +```rust filename="processor.rs" account_data.rating = rating; account_data.description = description; @@ -819,11 +834,11 @@ account_data.serialize(&mut &mut pda_account.data.borrow_mut()[..])?; Ok(()) ``` -All together, the `update_movie_review` function should look something like the -code snippet below. We've included some additional logging for clarity in +All together, the `update_movie_review()` function should look something like +the code snippet below. We've included some additional logging for clarity in debugging. -```rust +```rust filename="processor.rs" pub fn update_movie_review( program_id: &Pubkey, accounts: &[AccountInfo], @@ -900,7 +915,7 @@ pub fn update_movie_review( We're ready to build and upgrade our program! You can test your program by submitting a transaction with the right instruction data. For that, feel free to use this -[frontend](https://github.com/Unboxed-Software/solana-movie-frontend/tree/solution-update-reviews). +[frontend](https://github.com/solana-developers/movie-frontend/tree/solution-update-reviews). Remember, to make sure you're testing the right program you'll need to replace `MOVIE_REVIEW_PROGRAM_ID` with your program ID in `Form.tsx` and `MovieCoordinator.ts`. @@ -914,7 +929,7 @@ continuing. Now it's your turn to build something independently by building on top of the Student Intro program that you've used in previous lessons. If you haven't been -following along or haven't saved your code from before, feel free to use +following along or haven't saved your code before, feel free to use [this starter code](https://beta.solpg.io/62b11ce4f6273245aca4f5b2). The Student Intro program is a Solana Program that lets students introduce @@ -933,6 +948,7 @@ Note that your code may look slightly different than the solution code depending on the checks you implement and the errors you write. + Push your code to GitHub and [tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=3dfb98cc-7ba9-463d-8065-7bdb1c841d43)! diff --git a/content/courses/onchain-development/anchor-pdas.md b/content/courses/onchain-development/anchor-pdas.md index c728fe7e9..6df47a41a 100644 --- a/content/courses/onchain-development/anchor-pdas.md +++ b/content/courses/onchain-development/anchor-pdas.md @@ -592,7 +592,7 @@ public key, and the movie review's rating, title, and description. ```rust #[derive(Accounts)] -#[instruction(title:String, description:String)] +#[instruction(title:String)] pub struct AddMovieReview<'info> { #[account( init, @@ -639,6 +639,16 @@ pub mod anchor_movie_review_program { description: String, rating: u8, ) -> Result<()> { + + // We require that the rating is between 1 and 5 + require!(rating >= MIN_RATING && rating <= MAX_RATING, MovieReviewError::InvalidRating); + + // We require that the title is not longer than 20 characters + require!(title.len() <= MAX_TITLE_LENGTH, MovieReviewError::TitleTooLong); + + // We require that the description is not longer than 50 characters + require!(description.len() <= MAX_DESCRIPTION_LENGTH, MovieReviewError::DescriptionTooLong); + msg!("Movie review account space reallocated"); msg!("Title: {}", title); msg!("Description: {}", description); @@ -668,7 +678,7 @@ We'll also still need the `seeds` and `bump` constraints as we had them in ```rust #[derive(Accounts)] -#[instruction(title:String, description:String)] +#[instruction(title:String)] pub struct UpdateMovieReview<'info> { #[account( mut, diff --git a/content/courses/onchain-development/intro-to-anchor-frontend.md b/content/courses/onchain-development/intro-to-anchor-frontend.md index 7d2a419bb..c4ea5331e 100644 --- a/content/courses/onchain-development/intro-to-anchor-frontend.md +++ b/content/courses/onchain-development/intro-to-anchor-frontend.md @@ -154,11 +154,11 @@ counter program you built previously: Inspecting the IDL, you can see the `programId` and the `metadata` object which have been added in anchor 0.30.0 -This program contains two instructions (`initialize` and `increment`). +This program contains two instruction handlers, `initialize` and `increment`. -Notice that in addition to specifying the instructions, it species the accounts -and inputs for each instruction. The `initialize` instruction requires three -accounts: +Notice that in addition to specifying the instruction handlers, it specifies the +accounts and inputs for each instruction. The `initialize` instruction requires +three accounts: 1. `counter` - the new account being initialized in the instruction 2. `user` - the payer for the transaction and initialization @@ -196,7 +196,7 @@ import idl from "./idl.json"; You would _ideally_ also require types for the IDL which would make it easier to interact with the program. The types can be found at `/target/types` folder -after you have build your program. Here are the types for the above IDL which +after you have built your program. Here are the types for the above IDL which when you notice has the exact same structure as the IDL but are just as type helper. diff --git a/content/courses/onchain-development/intro-to-anchor.md b/content/courses/onchain-development/intro-to-anchor.md index efda0cd27..6648bf1ea 100644 --- a/content/courses/onchain-development/intro-to-anchor.md +++ b/content/courses/onchain-development/intro-to-anchor.md @@ -1,7 +1,7 @@ --- title: Intro to Anchor development objectives: - - Use the Anchor framework to build a basic program + - Use the Anchor framework to build a basic Solana program - Describe the basic structure of an Anchor program - Explain how to implement basic account validation and security checks with Anchor @@ -10,8 +10,9 @@ description: "Create your first Solana onchain program in Anchor." ## Summary -- **Programs** on Solana have **instruction handlers** that execute instruction - logic. +- **Programs** on Solana have **instruction handlers**, which are functions that + take arguments from incoming instructions. They are the entry point for any + operation in a program. - **Rust** is the most common language for building Solana programs. The **Anchor** framework takes care of common grunt work - like reading data from incoming instructions, and checking the right accounts are provided - so you @@ -19,15 +20,26 @@ description: "Create your first Solana onchain program in Anchor." ## Lesson -Solana's ability to run arbitrary executable code is part of what makes it so -powerful. Solana programs, similar to "smart contracts" in other blockchain -environments, are quite literally the backbone of the Solana ecosystem. And the -collection of programs grows daily as developers and creators dream up and -deploy new programs. +Before we begin, make sure you have Anchor installed. You can follow this lesson +on [local-setup](/content/onchain-development/local-setup.md). + +Solana's capacity to execute arbitrary code is a key part of its power. Solana +programs, (sometimes called "smart contracts"), are the very foundation of the +Solana ecosystem. And as developers and creators continuously conceive and +deploy new programs, the collection of Solana programs continues to expand +daily. + +Every popular Solana exchange, borrow-lend app, digital art auction house, perps +platform, and prediction market is a program. This lesson will give you a basic introduction to writing and deploying a Solana program using the Rust programming language and the Anchor framework. +> This and the further lessons in this course will give a good base to start +> building Solana programs with Anchor, however if you want to get more into +> Anchor, we would recommend checking out the +> [The Anchor Book](https://book.anchor-lang.com/). + ### What is Anchor? Anchor makes writing Solana programs easier, faster, and more secure, making it @@ -38,27 +50,32 @@ with writing a Solana program. ### Anchor program structure -Anchor uses macros and traits to generate boilerplate Rust code for you. These -provide a clear structure to your program so you can more easily reason about -your code. The main high-level macros and attributes are: +Anchor uses macros and traits to simplify Rust code for you. These provide a +clear structure to your program so you can focus more on its functionality. -- `declare_id` - a macro for declaring the program's onchain address +Some important macros provided by Anchor are: + +> From here on out, you'll see a lot of Rust. We assume that you are familiar +> with Rust, if not, we recommend you to check out +> [The Rust Book](https://doc.rust-lang.org/book/). + +- `declare_id!` - a macro for declaring the program’s onchain address - `#[program]` - an attribute macro used to denote the module containing the - program's instruction logic + program’s instruction handlers. - `Accounts` - a trait applied to structs representing the list of accounts - required for an instruction + required for an instruction. - `#[account]` - an attribute macro used to define custom account types for the - program + program. Let's talk about each of them before putting all the pieces together. ### Declare your program ID -The `declare_id` macro is used to specify the onchain address of the program -(i.e. the `programId`). When you build an Anchor program for the first time, the -framework will generate a new keypair. This becomes the default keypair used to -deploy the program unless specified otherwise. The corresponding public key -should be used as the `programId` specified in the `declare_id!` macro. +The `declare_id` macro sets the onchain address of the Anchor program (i.e. the +`programId`). When you create a new Anchor program, the framework generates a +default keypair. This keypair is used to deploy the program unless specified +otherwise. The public key of this keypair is used as the `programId` in the +`declare_id!` macro. ```rust declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); @@ -67,16 +84,16 @@ declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); ### Define instruction logic The `#[program]` attribute macro defines the module containing all of your -program's instructions. This is where you implement the business logic for each -instruction in your program. +program's instruction handlers. This is where you implement the business logic +for each operation in your program. Each public function in the module with the `#[program]` attribute will be -treated as a separate instruction. +treated as a separate instruction handler. -Each instruction function requires a parameter of type `Context` and can -optionally include additional function parameters representing instruction data. -Anchor will automatically handle instruction data deserialization so that you -can work with instruction data as Rust types. +Each instruction handler (function) requires a parameter of type `Context` and +can include more parameters as needed. Anchor will automatically handle +instruction data deserialization so that you can work with instruction data as +Rust types. ```rust #[program] @@ -90,13 +107,20 @@ mod program_module_name { } ``` -#### Instruction `Context` +- The `#[program]` attribute macro is used to denote the module containing the + program’s instruction logic. +- `use super::*;` is used to bring all the items from the parent module into + scope, which are needed to define the instruction logic. +- Next, there is the instruction handler function. This function just writes + some data (`instruction_data` in this case) to an account. + +### Instruction `Context` The `Context` type exposes instruction metadata and accounts to your instruction logic. ```rust -pub struct Context<'a, 'b, 'c, 'info, T> { +pub struct Context<'a, 'b, 'c, 'info, T: Bumps> { /// Currently executing program id. pub program_id: &'a Pubkey, /// Deserialized accounts. @@ -107,43 +131,55 @@ pub struct Context<'a, 'b, 'c, 'info, T> { /// Bump seeds found during constraint validation. This is provided as a /// convenience so that handlers don't have to recalculate bump seeds or /// pass them in as arguments. - pub bumps: BTreeMap, + /// Type is the bumps struct generated by #[derive(Accounts)] + pub bumps: T::Bumps, } ``` `Context` is a generic type where `T` defines the list of accounts an -instruction requires. When you use `Context`, you specify the concrete type of -`T` as a struct that adopts the `Accounts` trait (e.g. -`Context`). Through this context argument the -instruction can then access: +instruction handler requires. When you use `Context`, you specify the concrete +type of `T` as a struct that adopts the `Accounts` trait. + +The first argument of every instruction handler must be `Context`. `Context` +takes a generic of your `Accounts` struct, eg, if `AddMovieReview` was the +struct holding the accounts, the context for the `add_movie_review()` function +would be `Context`. + + + Yes, the Accounts struct is typically named the same thing as the instruction handler, just in TitleCase. Eg, the struct with the accounts for add_movie_review() is called AddMovieReview! + + +Through this context argument the instruction can then access: - The accounts passed into the instruction (`ctx.accounts`) - The program ID (`ctx.program_id`) of the executing program - The remaining accounts (`ctx.remaining_accounts`). The `remaining_accounts` is - a vector that contains all accounts that were passed into the instruction but - are not declared in the `Accounts` struct. + a vector that contains all accounts that were passed into the instruction + handler but are not declared in the `Accounts` struct. - The bumps for any PDA accounts in the `Accounts` struct (`ctx.bumps`) +- The seeds for any PDA accounts in tha `Accounts` struct (`ctx.seeds`) + +> The design of Contexts can be different across different programs to serve +> their purpose; and the name of the context could be anything (not limited to +> Context) to better reflect it's usage. This example is to help understand how +> contexts work in Anchor. ### Define instruction accounts -The `Accounts` trait defines a data structure of validated accounts. Structs -adopting this trait define the list of accounts required for a given -instruction. These accounts are then exposed through an instruction's `Context` -so that manual account iteration and deserialization is no longer necessary. +The `Accounts` trait: -You typically apply the `Accounts` trait through the `derive` macro (e.g. -`#[derive(Accounts)]`). This implements an `Accounts` deserializer on the given -struct and removes the need to deserialize each account manually. +- Defines a structure of validated accounts for an instruction handler +- Makes accounts accessible through an instruction handler's `Context` +- Is typically applied using `#[derive(Accounts)]` +- Implements an `Accounts` deserializer on the struct +- Performs constraint checks for secure program execution -Implementations of the `Accounts` trait are responsible for performing all -requisite constraint checks to ensure the accounts meet the conditions required -for the program to run securely. Constraints are provided for each field using -the `#account(..)` attribute (more on that shortly). +Example: -For example, `instruction_one` requires a `Context` argument of type -`InstructionAccounts`. The `#[derive(Accounts)]` macro is used to implement the -`InstructionAccounts` struct which includes three accounts: `account_name`, -`user`, and `system_program`. +- `instruction_one` requires a `Context` +- `InstructionAccounts` struct is implemented with `#[derive(Accounts)]` +- It includes accounts like `account_name`, `user`, and `system_program` +- Constraints are specified using the `#account(..)` attribute ```rust #[program] @@ -156,25 +192,30 @@ mod program_module_name { } #[derive(Accounts)] -pub struct InstructionAccounts { - #[account(init, payer = user, space = 8 + 8)] +pub struct InstructionAccounts<'info> { + #[account( + init, + payer = user, + space = DISCRIMINATOR + AccountStruct::INIT_SPACE + )] pub account_name: Account<'info, AccountStruct>, + #[account(mut)] pub user: Signer<'info>, - pub system_program: Program<'info, System>, + pub system_program: Program<'info, System>, } ``` When `instruction_one` is invoked, the program: -- Checks that the accounts passed into the instruction match the account types - specified in the `InstructionAccounts` struct +- Checks that the accounts passed into the instruction handler match the account + types specified in the `InstructionAccounts` struct - Checks the accounts against any additional constraints specified -If any accounts passed into `instruction_one` fail the account validation or -security checks specified in the `InstructionAccounts` struct, then the -instruction fails before even reaching the program logic. +> If any accounts passed into `instruction_one` fail the account validation or +> security checks specified in the `InstructionAccounts` struct, then the +> instruction fails before even reaching the program logic. ### Account validation @@ -274,7 +315,11 @@ point be sure to look at the full Recall again the `account_name` field from the `InstructionAccounts` example. ```rust -#[account(init, payer = user, space = 8 + 8)] +#[account( + init, + payer = user, + space = DISCRIMINATOR + AccountStruct::INIT_SPACE +)] pub account_name: Account<'info, AccountStruct>, #[account(mut)] pub user: Signer<'info>, @@ -287,10 +332,14 @@ values: it (sets its account discriminator) - `payer` - specifies the payer for the account initialization to be the `user` account defined in the struct -- `space`- specifies that the space allocated for the account should be `8 + 8` - bytes. The first 8 bytes are for a discriminator that Anchor automatically - adds to identify the account type. The next 8 bytes allocate space for the - data stored on the account as defined in the `AccountStruct` type. +- `space`- the space allocated on the blockchain to store the account. + - `DISCRIMINATOR` is the first 8 bytes of an account, which Anchor uses to + save the type of the account. + - `AccountStruct::INIT_SPACE` is the total size of space required for all the + items in the `AccountStruct`. + - The very need of using this `space` constraint can be eliminated by using + `#[derive(InitSpace)]` macro. We'll see how to use that further in this + lesson. For `user` we use the `#[account(..)]` attribute to specify that the given account is mutable. The `user` account must be marked as mutable because @@ -344,15 +393,21 @@ As an example, let's look at `AccountStruct` used by the `account_name` of ```rust #[derive(Accounts)] pub struct InstructionAccounts { - #[account(init, payer = user, space = 8 + 8)] + #[account(init, + payer = user, + space = DISCRIMINATOR + AnchorStruct::INIT_SPACE + )] pub account_name: Account<'info, AccountStruct>, ... } #[account] +#[derive(InitSpace)] pub struct AccountStruct { data: u64 } + +const DISCRIMINATOR: usize = 8; ``` The `#[account]` attribute ensures that it can be used as an account in @@ -360,10 +415,14 @@ The `#[account]` attribute ensures that it can be used as an account in When the `account_name` account is initialized: -- The first 8 bytes is set as the `AccountStruct` discriminator +- The first 8 bytes is set as the `AccountStruct` discriminator using the + `DISCRIMINATOR` constant. - The data field of the account will match `AccountStruct` - The account owner is set as the `programId` from `declare_id` +> It is considered a good practice to use the `#[derive(InitSpace)]` macro which +> makes the code more readable and maintainable. + ### Bring it all together When you combine all of these Anchor types you end up with a complete program. @@ -393,21 +452,37 @@ mod program_module_name { // Validate incoming accounts for instructions #[derive(Accounts)] pub struct InstructionAccounts<'info> { - #[account(init, payer = user, space = 8 + 8)] + #[account(init, + payer = user, + space = DISCRIMINATOR + AccountStruct::INIT_SPACE + )] pub account_name: Account<'info, AccountStruct>, #[account(mut)] pub user: Signer<'info>, pub system_program: Program<'info, System>, - } // Define custom program account type #[account] +#[derive(InitSpace)] pub struct AccountStruct { data: u64 } + +const DISCRIMINATOR: usize = 8; ``` +#### Key takeaways: + +- The whole program structure can be broadly divided into three parts: + 1. Account constraints: define the accounts required for the instructions, as + well as rules to apply to them - e.g., whether they need to sign the + transaction, if they should be created on demand, how addresses for PDAs, + etc. + 2. Instruction handlers: implement program logic, as functions inside + the`#[program]` module. + 3. Accounts: define the format used for data accounts. + You are now ready to build your own Solana program using the Anchor framework! ## Lab @@ -444,7 +519,7 @@ Open the file `lib.rs` and look at `declare_id!`: declare_id!("BouTUP7a3MZLtXqMAm1NrkJSKwAjmid8abqiNjUyBJSr"); ``` -Run `anchor keys sync` +and then run... ```shell anchor keys sync @@ -473,12 +548,11 @@ following: ```rust use anchor_lang::prelude::*; -declare_id!("your-private-key"); +declare_id!("onchain-program-address"); #[program] pub mod anchor_counter { use super::*; - } ``` @@ -489,13 +563,17 @@ type. The `Counter` struct defines one `count` field of type `u64`. This means that we can expect any new accounts initialized as a `Counter` type to have a matching data structure. The `#[account]` attribute also automatically sets the discriminator for a new account and sets the owner of the account as the -`programId` from the `declare_id!` macro. +`programId` from the `declare_id!` macro. We also use the `#[derive(InitSpace)]` +macro for convenient space allocation. ```rust #[account] +#[derive(InitSpace)] pub struct Counter { pub count: u64, } + +const DISCRIMINATOR: usize = 8; ``` #### 3. Implement `Context` type `Initialize` @@ -512,7 +590,10 @@ It'll need the following accounts: ```rust #[derive(Accounts)] pub struct Initialize<'info> { - #[account(init, payer = user, space = 8 + 8)] + #[account(init, + payer = user, + space = DISCRIMINATOR + Counter::INIT_SPACE + )] pub counter: Account<'info, Counter>, #[account(mut)] pub user: Signer<'info>, @@ -520,13 +601,13 @@ pub struct Initialize<'info> { } ``` -#### 4. Add the `initialize` instruction +#### 4. Add the `initialize` instruction handler Now that we have our `Counter` account and `Initialize` type , let's implement -the `initialize` instruction within `#[program]`. This instruction requires a -`Context` of type `Initialize` and takes no additional instruction data. In the -instruction logic, we are simply setting the `counter` account's `count` field -to `0`. +the `initialize` instruction handler within `#[program]`. This instruction +handler requires a `Context` of type `Initialize` and takes no additional +instruction data. In the instruction logic, we are simply setting the `counter` +account's `count` field to `0`. ```rust pub fn initialize(ctx: Context) -> Result<()> { @@ -541,8 +622,8 @@ pub fn initialize(ctx: Context) -> Result<()> { #### 5. Implement `Context` type `Update` Now, using the `#[derive(Accounts)]` macro again, let's create the `Update` type -that lists the accounts that the `increment` instruction requires. It'll need -the following accounts: +that lists the accounts that the `increment` instruction handler requires. It'll +need the following accounts: - `counter` - an existing counter account to increment - `user` - payer for the transaction fee @@ -559,14 +640,14 @@ pub struct Update<'info> { } ``` -#### 6. Add `increment` instruction +#### 6. Add `increment` instruction handler -Lastly, within `#[program]`, let's implement an `increment` instruction to -increment the `count` once a `counter` account is initialized by the first -instruction. This instruction requires a `Context` of type `Update` (implemented -in the next step) and takes no additional instruction data. In the instruction -logic, we are simply incrementing an existing `counter` account's `count` field -by `1`. +Lastly, within `#[program]`, let's implement an `increment` instruction handler +to increment the `count` once a `counter` account is initialized by the first +instruction handler. This instruction handler requires a `Context` of type +`Update` (implemented in the next step) and takes no additional instruction +data. In the instruction logic, we are simply incrementing an existing `counter` +account's `count` field by `1`. ```rust pub fn increment(ctx: Context) -> Result<()> { @@ -609,7 +690,10 @@ pub mod anchor_counter { #[derive(Accounts)] pub struct Initialize<'info> { - #[account(init, payer = user, space = 8 + 8)] + #[account(init, + payer = user, + space = DISCRIMINATOR + Counter::INIT_SPACE + )] pub counter: Account<'info, Counter>, #[account(mut)] pub user: Signer<'info>, @@ -624,9 +708,12 @@ pub struct Update<'info> { } #[account] +#[derive(InitSpace)] pub struct Counter { pub count: u64, } + +const DISCRIMINATOR: usize = 8; ``` Run `anchor build` to build the program. diff --git a/content/courses/program-optimization/lookup-tables.md b/content/courses/program-optimization/lookup-tables.md index fac7b6682..6576eb3d5 100644 --- a/content/courses/program-optimization/lookup-tables.md +++ b/content/courses/program-optimization/lookup-tables.md @@ -10,36 +10,33 @@ description: "Use large amounts of accounts by using lookup tables." ## Summary -- **Versioned Transactions** refers to a way to support both legacy versions and - newer versions of transaction formats. The original transaction format is - "legacy" and new transaction versions start at version 0. Versioned - transactions were implemented to support the use of Address Lookup Tables - (also called lookup tables or LUTs). -- **Address Lookup Tables** are accounts used to store addresses of other - accounts, which can then be referenced in versioned transactions using a 1 - byte index instead of the full 32 bytes per address. This enables the creation - of more complex transactions than what was possible prior to the introduction - of LUTs. +- **Versioned Transactions** in Solana allows support for both legacy and newer + transaction formats. The original format is referred to as "legacy," while new + formats begin at version 0. Versioned transactions were introduced to + accommodate the use of Address Lookup Tables (LUTs). +- **Address Lookup Tables** are special accounts that store the addresses of + other accounts. In versioned transactions, these addresses can be referenced + by a 1-byte index instead of the full 32-byte address. This optimization + enables more complex transactions than previously possible. ## Lesson By design, Solana transactions are limited to 1232 bytes. Transactions exceeding -this size will fail. While this enables a number of network optimizations, it -can also limit the types of atomic operations that can be performed on the -network. +this limit will fail, which restricts the size of atomic operations that can be +performed. While this limit allows for optimizations at the network level, it +imposes restrictions on transaction complexity. -To help get around the transaction size limitation, Solana released a new -transaction format that allows support for multiple versions of transaction -formats. At the time of writing, Solana supports two transaction versions: +To address transaction size limitations, Solana introduced a new transaction +format supporting multiple versions. Currently, two transaction versions are +supported: -1. `legacy` - the original transaction format -2. `0` - the newest transaction format that includes support for Address Lookup - Tables +1. `legacy` - The original transaction format +2. `0` - The latest format, which supports Address Lookup Tables. -Versioned transactions don't require any modifications to existing Solana -programs, but any client-side code created prior to the release of versioned -transactions should be updated. In this lesson, we'll cover the basics of -versioned transactions and how to use them, including: +Existing Solana programs do not require changes to support versioned +transactions. However, client-side code created prior to their introduction +should be updated. In this lesson, we'll cover the basics of versioned +transactions and how to use them, including: - Creating versioned transactions - Creating and managing lookup tables @@ -47,125 +44,114 @@ versioned transactions and how to use them, including: ### Versioned Transactions -One of the items taking up the most space in Solana transactions is the -inclusion of full account addresses. At 32 bytes each, 39 accounts will render a -transaction too large. That's not even accounting for instruction data. In -practice, most transactions will be too large with around 20 accounts. +In Solana transactions, one of the largest space consumers is account addresses, +which are 32 bytes each. For transactions with 39 accounts, the size limit is +exceeded even before accounting for instruction data. Typically, transactions +become too large with around 20 accounts. -Solana released versioned transactions to support multiple transaction formats. -Alongside the release of versioned transactions, Solana released version 0 of -transactions to support Address Lookup Tables. Lookup tables are separate -accounts that store account addresses and then allow them to be referenced in a -transaction using a 1 byte index. This significantly decreases the size of a -transaction since each included account now only needs to use 1 byte instead of -32 bytes. +Versioned transactions address this issue by introducing Address Lookup Tables, +which allow addresses to be stored separately and referenced via a 1-byte index. +This greatly reduces transaction size by minimizing the space needed for account +addresses. -Even if you don't need to use lookup tables, you'll need to know how to support -versioned transactions in your client-side code. Fortunately, everything you -need to work with versioned transactions and lookup tables is included in the -`@solana/web3.js` library. +Even if Address Lookup Tables are not required for your use case, understanding +versioned transactions is crucial for maintaining compatibility with the latest +Solana features. The `@solana/web3.js` library provides all necessary tools to +work with versioned transactions and lookup tables. #### Create versioned transactions -To create a versioned transaction, you simply create a `TransactionMessage` with +To create a versioned transaction, you first create a `TransactionMessage` with the following parameters: - `payerKey` - the public key of the account that will pay for the transaction - `recentBlockhash` - a recent blockhash from the network -- `instructions` - the instructions to include in the transaction +- `instructions` - the instructions to be executed in the transaction. -You then transform this message object into a version `0` transaction using the -`compileToV0Message()` method. +Once the message object is created, you can convert it into a version `0` +transaction using the `compileToV0Message()` method. ```typescript import * as web3 from "@solana/web3.js"; // Example transfer instruction -const transferInstruction = [ - web3.SystemProgram.transfer({ - fromPubkey: payer.publicKey, // Public key of account that will send the funds - toPubkey: toAccount.publicKey, // Public key of the account that will receive the funds - lamports: 1 * LAMPORTS_PER_SOL, // Amount of lamports to be transferred - }), -]; +const transferInstruction = SystemProgram.transfer({ + fromPubkey: payer.publicKey, // Public key of the sender account + toPubkey: toAccount.publicKey, // Public key of the receiver account + lamports: 1 * LAMPORTS_PER_SOL, // Amount to transfer in lamports +}); // Get the latest blockhash -let { blockhash } = await connection.getLatestBlockhash(); +const { blockhash } = await connection.getLatestBlockhash(); // Create the transaction message -const message = new web3.TransactionMessage({ - payerKey: payer.publicKey, // Public key of the account that will pay for the transaction - recentBlockhash: blockhash, // Latest blockhash - instructions: transferInstruction, // Instructions included in transaction +const message = new TransactionMessage({ + payerKey: payer.publicKey, // Public key of the payer account + recentBlockhash: blockhash, // Most recent blockhash + instructions: [transferInstruction], // Transaction instructions }).compileToV0Message(); ``` -Finally, you pass the compiled message into the `VersionedTransaction` -constructor to create a new versioned transaction. Your code can then sign and -send the transaction to the network, similar to a legacy transaction. +Next, pass the compiled message into the `VersionedTransaction` constructor to +create a versioned transaction. The transaction is then signed and sent to the +network, similar to how legacy transactions are handled. ```typescript -// Create the versioned transaction using the message -const transaction = new web3.VersionedTransaction(message); +// Create the versioned transaction from the compiled message +const transaction = new VersionedTransaction(message); -// Sign the transaction +// Sign the transaction with the payer's keypair transaction.sign([payer]); // Send the signed transaction to the network -const transactionSignature = await connection.sendTransaction(transaction); +const signature = await connection.sendTransaction(transaction); ``` ### Address Lookup Table -Address Lookup Tables (also called lookup tables or LUTs) are accounts that -store a lookup table of other account addresses. These LUT accounts are owned by -the Address Lookup Table Program and are used to increase the number of accounts -that can be included in a single transaction. +Address Lookup Tables (LUTs) are accounts that store references to other account +addresses. These LUT accounts, owned by the Address Lookup Table Program, +increase the number of accounts that can be included in a transaction. -Versioned transactions can include the address of an LUT account and then -reference additional accounts with a 1-byte index instead of including the full -address of those accounts. This significantly reduces the amount of space used -for referencing accounts in a transaction. +In versioned transactions, LUT addresses are included, and additional accounts +are referenced with a 1-byte index instead of the full 32-byte address, reducing +space used by the transaction. -To simplify the process of working with LUTs, the `@solana/web3.js` library -includes an `AddressLookupTableProgram` class which provides a set of methods to -create instructions for managing LUTs. These methods include: +The `@solana/web3.js` library offers an `AddressLookupTableProgram` class, +providing methods to manage LUTs: -- `createLookupTable` - creates a new LUT account -- `freezeLookupTable` - makes an existing LUT immutable -- `extendLookupTable` - adds addresses to an existing LUT -- `deactivateLookupTable` - puts an LUT in a “deactivation” period before it can - be closed -- `closeLookupTable` - permanently closes an LUT account +- `createLookupTable` - creates a new LUT account. +- `freezeLookupTable` - makes a LUT immutable. +- `extendLookupTable` - adds addresses to an existing LUT. +- `deactivateLookupTable` - begins the deactivation period for an LUT. +- `closeLookupTable` - permanently closes an LUT account. #### Create a lookup table -You use the `createLookupTable` method to construct the instruction that creates -a lookup table. The function requires the following parameters: +You can use the `createLookupTable` method to construct the instruction for +creating a lookup table. This requires the following parameters: -- `authority` - the account that will have permission to modify the lookup table -- `payer` - the account that will pay for the account creation -- `recentSlot` - a recent slot to derive the lookup table's address +- `authority` - the account authorized to modify the lookup table. +- `payer` - the account responsible for paying the account creation fees. +- `recentSlot` - a recent slot used to derive the lookup table's address. -The function returns both the instruction to create the lookup table and the -address of the lookup table. +The function returns both the instruction for creating the LUT and its address. ```typescript // Get the current slot const slot = await connection.getSlot(); -// Create an instruction for creating a lookup table -// and retrieve the address of the new lookup table +// Create the lookup table creation instruction and retrieve its address const [lookupTableInst, lookupTableAddress] = - web3.AddressLookupTableProgram.createLookupTable({ - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) - payer: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - recentSlot: slot - 1, // The recent slot to derive lookup table's address + AddressLookupTableProgram.createLookupTable({ + authority: user.publicKey, // Account authorized to modify the LUT + payer: user.publicKey, // Account paying for transaction fees + recentSlot: slot - 1, // Use a recent slot to derive the LUT address }); ``` -Under the hood, the lookup table address is simply a PDA derived using the -`authority` and `recentSlot` as seeds. +Under the hood, the lookup table address is a Program Derived Address (PDA) +generated using the `authority` and `recentSlot` as seeds. ```typescript const [lookupTableAddress, bumpSeed] = PublicKey.findProgramAddressSync( @@ -174,10 +160,12 @@ const [lookupTableAddress, bumpSeed] = PublicKey.findProgramAddressSync( ); ``` -Note that using the most recent slot sometimes results in an error after sending -the transaction. To avoid this, you can use a slot that is one slot prior the -most recent one (e.g. `recentSlot: slot - 1`). However, if you still encounter -an error when sending the transaction, you can try resending the transaction. + +Using the most recent slot sometimes results in errors when submitting the +transaction. To avoid this, it’s recommended to use a slot that is one slot +before the most recent one (`recentSlot: currentSlot - 1`). If you still +encounter errors when sending the transaction, try resubmitting it. + ``` "Program AddressLookupTab1e1111111111111111111111111 invoke [1]", @@ -187,57 +175,57 @@ an error when sending the transaction, you can try resending the transaction. #### Extend a lookup table -You use the `extendLookupTable` method to create an instruction that adds -addresses to an existing lookup table. It takes the following parameters: +The `extendLookupTable` method creates an instruction to add addresses to an +existing lookup table. It requires the following parameters: -- `payer` - the account that will pay for the transaction fees and any increased - rent -- `authority` - the account that has permission to change the lookup table -- `lookupTable` - the address of the lookup table to extend -- `addresses` - the addresses to add to the lookup table +- `payer` - the account responsible for paying transaction fees and any + additional rent. +- `authority` - the account authorized to modify the lookup table. +- `lookupTable` - the address of the lookup table to be extended. +- `addresses` - the list of addresses to add to the lookup table. The function returns an instruction to extend the lookup table. ```typescript const addresses = [ - new web3.PublicKey("31Jy3nFeb5hKVdB4GS4Y7MhU7zhNMFxwF7RGVhPc1TzR"), - new web3.PublicKey("HKSeapcvwJ7ri6mf3HwBtspLFTDKqaJrMsozdfXfg5y2"), - // add more addresses + new PublicKey("31Jy3nFeb5hKVdB4GS4Y7MhU7zhNMFxwF7RGVhPc1TzR"), + new PublicKey("HKSeapcvwJ7ri6mf3HwBtspLFTDKqaJrMsozdfXfg5y2"), + // Add more addresses here ]; -// Create an instruction to extend a lookup table with the provided addresses -const extendInstruction = web3.AddressLookupTableProgram.extendLookupTable({ - payer: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) - lookupTable: lookupTableAddress, // The address of the lookup table to extend - addresses: addresses, // The addresses to add to the lookup table +// Create the instruction to extend the lookup table with the provided addresses +const extendInstruction = AddressLookupTableProgram.extendLookupTable({ + payer: user.publicKey, // Account paying for transaction fees + authority: user.publicKey, // Account authorized to modify the lookup table + lookupTable: lookupTableAddress, // Address of the lookup table to extend + addresses: addresses, // Addresses to add to the lookup table }); ``` Note that when extending a lookup table, the number of addresses that can be -added in one instruction is limited by the transaction size limit, which is 1232 -bytes. This means you can add 30 addresses to a lookup table at a time. If you -need to add more than that, you'll need to send multiple transactions. Each -lookup table can store a maximum of 256 addresses. +added in a single instruction is limited by the transaction size limit of 1232 +bytes. You can add approximately 30 addresses in one transaction. If you need to +add more than that, multiple transactions are required. Each lookup table can +store up to 256 addresses. #### Send Transaction -After creating the instructions, you can add them to a transaction and sent it -to the network. +After creating the instructions, you can add them to a transaction and send it +to the network: ```typescript // Get the latest blockhash -let { blockhash } = await connection.getLatestBlockhash(); +const { blockhash } = await connection.getLatestBlockhash(); // Create the transaction message -const message = new web3.TransactionMessage({ - payerKey: payer.publicKey, // Public key of the account that will pay for the transaction +const message = new TransactionMessage({ + payerKey: payer.publicKey, // Account paying for the transaction recentBlockhash: blockhash, // Latest blockhash - instructions: [lookupTableInst, extendInstruction], // Instructions included in transaction + instructions: [lookupTableInst, extendInstruction], // Instructions to be included in the transaction }).compileToV0Message(); -// Create the versioned transaction using the message -const transaction = new web3.VersionedTransaction(message); +// Create the versioned transaction from the message +const transaction = new VersionedTransaction(message); // Sign the transaction transaction.sign([payer]); @@ -246,65 +234,62 @@ transaction.sign([payer]); const transactionSignature = await connection.sendTransaction(transaction); ``` -Note that when you first create or extend a lookup table, it needs to "warm up" -for one slot before the LUT or new addresses can be used in transactions. In -other words, you can only use lookup tables and access addresses that were added -prior to the current slot. +Note that after you create or extend a lookup table, it must "warm up" for one +slot before the lookup table or newly added addresses can be used in +transactions. You can only access lookup tables and addresses added in slots +prior to the current one. + +If you encounter the following error, it may indicate that you're trying to +access a lookup table or an address before the warm-up period has completed: ```typescript SendTransactionError: failed to send transaction: invalid transaction: Transaction address table lookup uses an invalid index ``` -If you encounter the error above or are unable to access addresses in a lookup -table immediately after extending it, it's likely because you're attempting to -access the lookup table or a specific address prior to the end of the warm up -period. To avoid this issue, add a delay after extending the lookup table before -sending a transaction that references the table. +To avoid this issue, ensure you add a delay after extending the lookup table +before attempting to reference the table in a transaction. #### Deactivate a lookup table -When a lookup table is no longer needed, you can deactivate and close it to -reclaim its rent balance. Address lookup tables can be deactivated at any time, -but they can continue to be used by transactions until a specified -"deactivation" slot is no longer "recent". This "cool-down" period ensures that -in-flight transactions can't be censored by LUTs being closed and recreated in -the same slot. The deactivation period is approximately 513 slots. +When a lookup table (LUT) is no longer needed, you can deactivate it to reclaim +its rent balance. Deactivating a LUT puts it into a "cool-down" period +(approximately 513 slots) during which it can still be used by transactions. +This prevents transactions from being censored by deactivating and recreating +LUTs within the same slot. -To deactivate an LUT, use the `deactivateLookupTable` method and pass in the -following parameters: +To deactivate a LUT, use the `deactivateLookupTable` method with the following +parameters: -- `lookupTable` - the address of the LUT to be deactivated -- `authority` - the account with permission to deactivate the LUT +- `lookupTable` - the address of the lookup table to be deactivated. +- `authority` - the account with the authority to deactivate the LUT. ```typescript -const deactivateInstruction = - web3.AddressLookupTableProgram.deactivateLookupTable({ - lookupTable: lookupTableAddress, // The address of the lookup table to deactivate - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) - }); +const deactivateInstruction = AddressLookupTableProgram.deactivateLookupTable({ + lookupTable: lookupTableAddress, // Address of the lookup table to deactivate + authority: user.publicKey, // Authority to modify the lookup table +}); ``` #### Close a lookup table -To close a lookup table after its deactivation period, use the -`closeLookupTable` method. This method creates an instruction to close a -deactivated lookup table and reclaim its rent balance. It takes the following -parameters: +Once a LUT has been deactivated and the cool-down period has passed, you can +close the lookup table to reclaim its rent balance. Use the `closeLookupTable` +method, which requires the following parameters: -- `lookupTable` - the address of the LUT to be closed -- `authority` - the account with permission to close the LUT -- `recipient` - the account that will receive the reclaimed rent balance +- `lookupTable` - the address of the LUT to be closed. +- `authority` - the account with the authority to close the LUT. +- `recipient` - the account that will receive the reclaimed rent balance. ```typescript -const closeInstruction = web3.AddressLookupTableProgram.closeLookupTable({ - lookupTable: lookupTableAddress, // The address of the lookup table to close - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) - recipient: user.publicKey, // The recipient of closed account lamports +const closeInstruction = AddressLookupTableProgram.closeLookupTable({ + lookupTable: lookupTableAddress, // Address of the lookup table to close + authority: user.publicKey, // Authority to close the LUT + recipient: user.publicKey, // Recipient of the reclaimed rent balance }); ``` -Attempting to close a lookup table before it's been fully deactivated will -result in an error. +Attempting to close a LUT before it has been fully deactivated will result in +the following error: ``` "Program AddressLookupTab1e1111111111111111111111111 invoke [1]", @@ -317,21 +302,21 @@ result in an error. In addition to standard CRUD operations, you can "freeze" a lookup table. This makes it immutable so that it can no longer be extended, deactivated, or closed. -You freeze a lookup table with the `freezeLookupTable` method. It takes the +The `freezeLookupTable` method is used for this operation and takes the following parameters: -- `lookupTable` - the address of the LUT to be frozen -- `authority` - the account with permission to freeze the LUT +- `lookupTable` - the address of the LUT to freeze. +- `authority` - the account with the authority to freeze the LUT. ```typescript -const freezeInstruction = web3.AddressLookupTableProgram.freezeLookupTable({ - lookupTable: lookupTableAddress, // The address of the lookup table to freeze - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) +const freezeInstruction = AddressLookupTableProgram.freezeLookupTable({ + lookupTable: lookupTableAddress, // Address of the lookup table to freeze + authority: user.publicKey, // Authority to freeze the LUT }); ``` -Once an LUT is frozen, any further attempts to modify it will result in an -error. +Once a LUT is frozen, any attempt to modify it will result in an error like the +following: ``` "Program AddressLookupTab1e1111111111111111111111111 invoke [1]", @@ -341,28 +326,29 @@ error. #### Using lookup tables in versioned transactions -To use a lookup table in a versioned transaction, you need to retrieve the -lookup table account using its address. +To utilize a lookup table in a versioned transaction, first retrieve the lookup +table account using its address: ```typescript +// Fetch the lookup table account from the blockchain using its address const lookupTableAccount = ( - await connection.getAddressLookupTable(lookupTableAddress) + await connection.getAddressLookupTable(new PublicKey(lookupTableAddress)) ).value; ``` -You can then create a list of instructions to include in a transaction as usual. -When creating the `TransactionMessage`, you can include any lookup table -accounts by passing them as an array to the `compileToV0Message()` method. You -can also provide multiple lookup table accounts. +Once you have the lookup table account, you can create the list of instructions +for the transaction. When constructing the `TransactionMessage`, pass the lookup +table accounts as an array to the `compileToV0Message()` method. You can include +multiple lookup table accounts if needed. ```typescript const message = new web3.TransactionMessage({ - payerKey: payer.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - recentBlockhash: blockhash, // The blockhash of the most recent block - instructions: instructions, // The instructions to include in the transaction + payerKey: payer.publicKey, // Public key of the account paying for the transaction + recentBlockhash: blockhash, // Blockhash of the most recent block + instructions: instructions, // Instructions to be included in the transaction }).compileToV0Message([lookupTableAccount]); // Include lookup table accounts -// Create the versioned transaction using the message +// Create a versioned transaction using the compiled message const transaction = new web3.VersionedTransaction(message); // Sign the transaction @@ -376,261 +362,324 @@ const transactionSignature = await connection.sendTransaction(transaction); Let's go ahead and practice using lookup tables! -this lab will guide you through the steps of creating, extending, and then using -a lookup table in a versioned transaction. - -#### 1. Get the starter code - -To begin, download the starter code from the starter branch of this -[repository](https://github.com/Unboxed-Software/solana-versioned-transactions/tree/starter). -Once you have the starter code, run `npm install` in the terminal to install the -required dependencies. - -The starter code includes an example of creating a legacy transaction that -intends to atomically transfer SOL to 22 recipients. The transaction contains 22 -instructions where each instruction transfers SOL from the signer to a different -recipient. - -The purpose of the starter code is to illustrate the limitation on the number of -addresses that can be included in a legacy transaction. The transaction built in -the starter code is expected to fail when sent. - -The following starter code can be found in the `index.ts` file. - -```typescript -import { initializeKeypair } from "./initializeKeypair"; -import * as web3 from "@solana/web3.js"; +This lab will guide you through creating, extending, and using a lookup table in +a versioned transaction. + +#### 1. Create the `try-large-transaction.ts` file + +To begin, create a new file named `try-large-transaction.ts` in your project +directory. This file will contain the code to illustrate a scenario where a +legacy transaction is created to transfer SOL to 22 recipients in a single +atomic transaction. The transaction will include 22 separate instructions, each +transferring SOL from the payer (signer) to a different recipient. + +This example highlights a key limitation of legacy transactions when trying to +accommodate many account addresses within a single transaction. As expected, +when attempting to send this transaction, it will likely fail due to exceeding +the transaction size limits. + +Here’s the code to include in `try-large-transaction.ts`: + +```typescript filename="try-large-transaction.ts" +import { + Connection, + clusterApiUrl, + Keypair, + Transaction, + SystemProgram, + LAMPORTS_PER_SOL, + sendAndConfirmTransaction, +} from "@solana/web3.js"; +import { + initializeKeypair, + makeKeypairs, + getExplorerLink, +} from "@solana-developers/helpers"; +import dotenv from "dotenv"; +dotenv.config(); async function main() { - // Connect to the devnet cluster - const connection = new web3.Connection(web3.clusterApiUrl("devnet")); + // Connect to the local Solana cluster + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); - // Initialize the user's keypair - const user = await initializeKeypair(connection); - console.log("PublicKey:", user.publicKey.toBase58()); + // Initialize the keypair from the environment variable or create a new one + const payer = await initializeKeypair(connection); - // Generate 22 addresses - const recipients = []; - for (let i = 0; i < 22; i++) { - recipients.push(web3.Keypair.generate().publicKey); - } + // Generate 22 recipient keypairs using makeKeypairs + const recipients = makeKeypairs(22).map(keypair => keypair.publicKey); - // Create an array of transfer instructions - const transferInstructions = []; + // Create a legacy transaction + const transaction = new Transaction(); - // Add a transfer instruction for each address - for (const address of recipients) { - transferInstructions.push( - web3.SystemProgram.transfer({ - fromPubkey: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - toPubkey: address, // The destination account for the transfer - lamports: web3.LAMPORTS_PER_SOL * 0.01, // The amount of lamports to transfer + // Add 22 transfer instructions to the transaction + recipients.forEach(recipient => { + transaction.add( + SystemProgram.transfer({ + fromPubkey: payer.publicKey, + toPubkey: recipient, + lamports: LAMPORTS_PER_SOL * 0.01, // Transfer 0.01 SOL to each recipient }), ); - } - - // Create a transaction and add the transfer instructions - const transaction = new web3.Transaction().add(...transferInstructions); - - // Send the transaction to the cluster (this will fail in this example if addresses > 21) - const txid = await connection.sendTransaction(transaction, [user]); - - // Get the latest blockhash and last valid block height - const { lastValidBlockHeight, blockhash } = - await connection.getLatestBlockhash(); - - // Confirm the transaction - await connection.confirmTransaction({ - blockhash: blockhash, - lastValidBlockHeight: lastValidBlockHeight, - signature: txid, }); - // Log the transaction URL on the Solana Explorer - console.log(`https://explorer.solana.com/tx/${txid}?cluster=devnet`); + // Sign and send the transaction + try { + const signature = await sendAndConfirmTransaction(connection, transaction, [ + payer, + ]); + console.log( + `Transaction successful with signature: ${getExplorerLink("tx", signature, "devnet")}`, + ); + } catch (error) { + console.error("Transaction failed:", error); + } } ``` -To execute the code, run `npm start`. This will create a new keypair, write it -to the `.env` file, airdrop devnet SOL to the keypair, and send the transaction -built in the starter code. The transaction is expected to fail with the error -message `Transaction too large`. +To run the example, execute `npx esrun try-large-transaction.ts`. This process +will: + +- Generate a new keypair. +- Store the keypair details in the `.env` file. +- Request airdrop of devnet SOL to the generated keypair. +- Attempt to send the transaction. +- Since the transaction includes 22 instructions, it is expected to fail with + the error: "Transaction too large". ``` Creating .env file Current balance is 0 Airdropping 1 SOL... New balance is 1 -PublicKey: 5ZZzcDbabFHmoZU8vm3VzRzN5sSQhkf91VJzHAJGNM7B +PublicKey: 7YsGYC4EBs6Dxespe4ZM3wfCp856xULWoLw7QUcVb6VG Error: Transaction too large: 1244 > 1232 ``` -In the next steps, we'll go over how to use lookup tables with versioned -transactions to increase the number of addresses that can be included in a -single transaction. - -Before we start, go ahead and delete the content of the `main` function to leave -only the following: +#### 2. Create the `use-lookup-tables.ts` File + +Next, we'll explore how to use lookup tables in combination with versioned +transactions to overcome the limitation of legacy transactions and include a +greater number of addresses in a single transaction. + +Create a new file named `use-lookup-tables.ts` in your project directory. This +file will contain the code to demonstrate the use of lookup tables. + +Here’s the starter code to include in `use-lookup-tables.ts` file: + +```typescript filename="use-lookup-tables.ts" +import { + Connection, + clusterApiUrl, + Keypair, + TransactionInstruction, + AddressLookupTableAccount, + SystemProgram, + VersionedTransaction, + TransactionMessage, + AddressLookupTableProgram, + LAMPORTS_PER_SOL, + getSlot, +} from "@solana/web3.js"; +import { + initializeKeypair, + makeKeypairs, + getExplorerLink, +} from "@solana-developers/helpers"; +import dotenv from "dotenv"; +dotenv.config(); -```typescript async function main() { - // Connect to the devnet cluster - const connection = new web3.Connection(web3.clusterApiUrl("devnet")); + // Connect to the local Solana cluster + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); - // Initialize the user's keypair - const user = await initializeKeypair(connection); - console.log("PublicKey:", user.publicKey.toBase58()); + // Initialize the keypair from the environment variable or create a new one + const payer = await initializeKeypair(connection); - // Generate 22 addresses - const addresses = []; - for (let i = 0; i < 22; i++) { - addresses.push(web3.Keypair.generate().publicKey); - } + // Generate 22 recipient keypairs using makeKeypairs + const recipients = makeKeypairs(22).map(keypair => keypair.publicKey); } ``` -#### 2. Create a `sendV0Transaction` helper function +Next, we will create a few helper functions that will be crucial for working +with versioned transactions and lookup tables. These functions will simplify our +process and make our code more modular and reusable. -We'll be sending multiple "version 0" transactions, so let's create a helper -function to facilitate this. +#### 3. Create a `sendV0Transaction` helper function -This function should take parameters for a connection, a user's keypair, an -array of transaction instructions, and an optional array of lookup table -accounts. +To handle versioned transactions, we will create a helper function in +`use-lookup-tables.ts` file, called `sendV0Transaction`, to simplify the +process. This function will accept the following parameters: -The function then performs the following tasks: +- `connection`: the solana connection to the cluster (e.g., devnet). +- `user`: the keypair of the user (payer) signing the transaction. +- `instructions`: an array of TransactionInstruction objects to include in the + transaction. +- `lookupTableAccounts` (optional): an array of lookup table accounts, if + applicable, to reference additional addresses. -- Retrieves the latest blockhash and last valid block height from the Solana - network -- Creates a new transaction message using the provided instructions -- Signs the transaction using the user's keypair -- Sends the transaction to the Solana network -- Confirms the transaction -- Logs the transaction URL on the Solana Explorer +This helper function will: -```typescript +- Retrieve the latest blockhash and last valid block height from the Solana + network. +- Compile a versioned transaction message using the provided instructions. +- Sign the transaction using the user's keypair. +- Send the transaction to the network. +- Confirm the transaction and log the transaction's URL using Solana Explorer. + +```typescript filename="use-lookup-tables.ts" async function sendV0Transaction( - connection: web3.Connection, - user: web3.Keypair, - instructions: web3.TransactionInstruction[], - lookupTableAccounts?: web3.AddressLookupTableAccount[], + connection: Connection, + user: Keypair, + instructions: TransactionInstruction[], + lookupTableAccounts?: AddressLookupTableAccount[], ) { // Get the latest blockhash and last valid block height - const { lastValidBlockHeight, blockhash } = + const { blockhash, lastValidBlockHeight } = await connection.getLatestBlockhash(); // Create a new transaction message with the provided instructions - const messageV0 = new web3.TransactionMessage({ + const messageV0 = new TransactionMessage({ payerKey: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) recentBlockhash: blockhash, // The blockhash of the most recent block instructions, // The instructions to include in the transaction - }).compileToV0Message(lookupTableAccounts ? lookupTableAccounts : undefined); - - // Create a new transaction object with the message - const transaction = new web3.VersionedTransaction(messageV0); + }).compileToV0Message(lookupTableAccounts); - // Sign the transaction with the user's keypair - transaction.sign([user]); + // Create a versioned transaction from the message + const transaction = new VersionedTransaction(messageV0); - // Send the transaction to the cluster - const txid = await connection.sendTransaction(transaction); - - // Confirm the transaction - await connection.confirmTransaction( + // Use the helper function to send and confirm the transaction + const txid = await sendAndConfirmTransactionV0( + connection, + transaction, + [user], { - blockhash: blockhash, - lastValidBlockHeight: lastValidBlockHeight, - signature: txid, + commitment: "finalized", // Ensures the transaction is confirmed at the highest level }, - "finalized", ); - // Log the transaction URL on the Solana Explorer - console.log(`https://explorer.solana.com/tx/${txid}?cluster=devnet`); + // Log the transaction URL on the Solana Explorer using the helper + const explorerLink = getExplorerLink("tx", txid, "devnet"); + console.log( + `Transaction successful! View it on Solana Explorer: ${explorerLink}`, + ); } ``` -#### 3. Create a `waitForNewBlock` helper function +#### 4. Create a `waitForNewBlock` helper function -Recall that lookup tables and the addresses contained in them can't be -referenced immediately after creation or extension. This means we'll end up -needing to wait for a new block before submitting transactions that reference -the newly created or extended lookup table. To make this simpler down the road, -let's create a `waitForNewBlock` helper function that we'll use to wait for -lookup tables to activate between sending transactions. +When working with lookup tables, it's important to remember that newly created +or extended lookup tables cannot be referenced immediately. Therefore, before +submitting transactions that reference these tables, we need to wait for a new +block to be generated. -This function will have parameters for a connection and a target block height. -It then starts an interval that checks the current block height of the network -every 1000ms. Once the new block height exceeds the target height, the interval -is cleared and the promise is resolved. +We will create a `waitForNewBlock` helper function that accepts: -```typescript -function waitForNewBlock(connection: web3.Connection, targetHeight: number) { - console.log(`Waiting for ${targetHeight} new blocks`); - return new Promise(async (resolve: any) => { - // Get the last valid block height of the blockchain - const { lastValidBlockHeight } = await connection.getLatestBlockhash(); +- `connection`: the Solana network connection. +- `targetBlockHeight`: the target block height to wait for. + +This function will: + +- Start an interval that checks the current block height of the network every + second (1000ms). +- Resolve the promise once the current block height exceeds the target block + height. - // Set an interval to check for new blocks every 1000ms +```typescript filename="use-lookup-tables.ts" +async function waitForNewBlock( + connection: Connection, + targetHeight: number, +): Promise { + console.log(`Waiting for ${targetHeight} new blocks...`); + + // Get the initial block height of the blockchain + const { lastValidBlockHeight: initialBlockHeight } = + await connection.getLatestBlockhash(); + + return new Promise(resolve => { + const SECOND = 1000; + const checkInterval = 1 * SECOND; // Interval to check for new blocks (1000ms) + + // Set an interval to check for new block heights const intervalId = setInterval(async () => { - // Get the new valid block height - const { lastValidBlockHeight: newValidBlockHeight } = - await connection.getLatestBlockhash(); - // console.log(newValidBlockHeight) - - // Check if the new valid block height is greater than the target block height - if (newValidBlockHeight > lastValidBlockHeight + targetHeight) { - // If the target block height is reached, clear the interval and resolve the promise + try { + // Get the current block height + const { lastValidBlockHeight: currentBlockHeight } = + await connection.getLatestBlockhash(); + + // If the current block height exceeds the target, resolve and clear interval + if (currentBlockHeight >= initialBlockHeight + targetHeight) { + clearInterval(intervalId); + console.log(`New block height reached: ${currentBlockHeight}`); + resolve(); + } + } catch (error) { + console.error("Error fetching block height:", error); clearInterval(intervalId); - resolve(); + resolve(); // Resolve to avoid hanging in case of errors } - }, 1000); + }, checkInterval); }); } ``` -#### 4. Create an `initializeLookupTable` function +#### 5. Create an `initializeLookupTable` function -Now that we have some helper functions ready to go, declare a function named -`initializeLookupTable`. This function has parameters `user`, `connection`, and -`addresses`. The function will: +Next, we need to initialize a lookup table to hold the addresses of the +recipients. The `initializeLookupTable` function will accept the following +parameters: -1. Retrieve the current slot -2. Generate an instruction for creating a lookup table -3. Generate an instruction for extending the lookup table with the provided - addresses -4. Send and confirm a transaction with the instructions for creating and - extending the lookup table -5. Return the address of the lookup table +- `user`: the user's keypair (payer and authority). +- `connection`: the Solana network connection. +- `addresses`: an array of recipient addresses (public keys) to add to the + lookup table. -```typescript +The function will: + +- Retrieve the current slot to derive the lookup table's address. +- Generate the necessary instructions to create and extend the lookup table with + the provided recipient addresses. +- Send and confirm a transaction that includes these instructions. +- Return the address of the newly created lookup table. + +Although the transaction includes the full recipient addresses, using the lookup +table allows Solana to reference those addresses with significantly fewer bytes +in the actual transaction. By including the lookup table in the versioned +transaction, the framework optimizes the transaction size, replacing addresses +with pointers to the lookup table. + +This design is crucial for enabling the transaction to support more recipients +by staying within Solana’s transaction size limits. + +```typescript filename="use-lookup-tables.ts" async function initializeLookupTable( - user: web3.Keypair, - connection: web3.Connection, - addresses: web3.PublicKey[], -): Promise { - // Get the current slot - const slot = await connection.getSlot(); + user: Keypair, + connection: Connection, + addresses: PublicKey[], +): Promise { + // Get the current slot using a helper function from @solana/web3.js + const slot = await getSlot(connection); // Create an instruction for creating a lookup table // and retrieve the address of the new lookup table const [lookupTableInst, lookupTableAddress] = - web3.AddressLookupTableProgram.createLookupTable({ - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) - payer: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - recentSlot: slot - 1, // The recent slot to derive lookup table's address + AddressLookupTableProgram.createLookupTable({ + authority: user.publicKey, // The authority to modify the lookup table + payer: user.publicKey, // The payer for transaction fees + recentSlot: slot - 1, // The slot for lookup table address derivation }); - console.log("lookup table address:", lookupTableAddress.toBase58()); + + console.log("Lookup Table Address:", lookupTableAddress.toBase58()); // Create an instruction to extend a lookup table with the provided addresses - const extendInstruction = web3.AddressLookupTableProgram.extendLookupTable({ - payer: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) - lookupTable: lookupTableAddress, // The address of the lookup table to extend - addresses: addresses.slice(0, 30), // The addresses to add to the lookup table + const extendInstruction = AddressLookupTableProgram.extendLookupTable({ + payer: user.publicKey, // The payer of transaction fees + authority: user.publicKey, // The authority to extend the lookup table + lookupTable: lookupTableAddress, // Address of the lookup table to extend + addresses: addresses.slice(0, 30), // Add up to 30 addresses per instruction }); - await sendV0Transaction(connection, user, [ + // Use the helper function to send a versioned transaction + await sendVersionedTransaction(connection, user, [ lookupTableInst, extendInstruction, ]); @@ -639,73 +688,84 @@ async function initializeLookupTable( } ``` -#### 5. Modify `main` to use lookup tables +#### 6. Modify `main` to use lookup tables -Now that we can initialize a lookup table with all of the recipients' addresses, -let's update `main` to use versioned transactions and lookup tables. We'll need -to: +With the helper functions in place, we are now ready to modify the `main` +function to utilize versioned transactions and address lookup tables. To do so, +we will follow these steps: -1. Call `initializeLookupTable` -2. Call `waitForNewBlock` -3. Get the lookup table using `connection.getAddressLookupTable` -4. Create the transfer instruction for each recipient -5. Send the v0 transaction with all of the transfer instructions +1. Call `initializeLookupTable`: Create and extend the lookup table with the + recipients' addresses. +2. Call `waitForNewBlock`: Ensure the lookup table is activated by waiting for a + new block. +3. Retrieve the Lookup Table: Use `connection.getAddressLookupTabl`e to fetch + the lookup table and reference it in the transaction. +4. Create Transfer Instructions: Generate a transfer instruction for each + recipient. +5. Send the Versioned Transaction: Use `sendV0Transaction` to send a single + transaction with all transfer instructions, referencing the lookup table. -```typescript +```typescript filename="use-lookup-tables.ts" async function main() { - // Connect to the devnet cluster - const connection = new web3.Connection(web3.clusterApiUrl("devnet")); + // Connect to the devnet Solana cluster + const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); - // Initialize the user's keypair - const user = await initializeKeypair(connection); - console.log("PublicKey:", user.publicKey.toBase58()); - - // Generate 22 addresses - const recipients = []; - for (let i = 0; i < 22; i++) { - recipients.push(web3.Keypair.generate().publicKey); - } + // Initialize the keypair from the environment variable or create a new one + const payer = await initializeKeypair(connection); + // Generate 22 recipient keypairs using makeKeypairs + const recipients = makeKeypairs(22).map(keypair => keypair.publicKey); + // Initialize the lookup table with the generated recipients const lookupTableAddress = await initializeLookupTable( user, connection, recipients, ); + // Wait for a new block before using the lookup table await waitForNewBlock(connection, 1); + // Fetch the lookup table account const lookupTableAccount = ( await connection.getAddressLookupTable(lookupTableAddress) ).value; + // Check if the lookup table was successfully fetched if (!lookupTableAccount) { throw new Error("Lookup table not found"); } - const transferInstructions = recipients.map(recipient => { - return web3.SystemProgram.transfer({ - fromPubkey: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - toPubkey: recipient, // The destination account for the transfer - lamports: web3.LAMPORTS_PER_SOL * 0.01, // The amount of lamports to transfer - }); - }); + // Create transfer instructions for each recipient + const transferInstructions = recipients.map(recipient => + SystemProgram.transfer({ + fromPubkey: user.publicKey, // The payer + toPubkey: recipient, // The recipient + lamports: LAMPORTS_PER_SOL * 0.01, // Amount to transfer + }), + ); - await sendV0Transaction(connection, user, transferInstructions, [ - lookupTableAccount, - ]); + // Send the versioned transaction including the lookup table + const txid = await sendVersionedTransaction( + connection, + user, + transferInstructions, + [lookupTableAccount], + ); + + // Log the transaction link for easy access + console.log(`Transaction URL: ${getExplorerLink("tx", txid, "devnet")}`); } ``` -Notice that you create the transfer instructions with the full recipient address -even though we created a lookup table. That's because by including the lookup -table in the versioned transaction, you tell the `web3.js` framework to replace -any recipient addresses that match addresses in the lookup table with pointers -to the lookup table instead. By the time the transaction is sent to the network, -addresses that exist in the lookup table will be referenced by a single byte -rather than the full 32 bytes. +Even though we will create transfer instructions with full recipient addresses, +the use of lookup tables allows the `@solana/web3.js` framework to optimize the +transaction size. The addresses in the transaction that match entries in the +lookup table will be replaced with compact pointers referencing the lookup +table. By doing this, addresses will be represented using only a single byte in +the final transaction, significantly reducing the transaction's size. -Use `npm start` in the command line to execute the `main` function. You should -see an output similar to the following: +Use `npx esrun use-lookup-tables.ts` in the command line to execute the `main` +function. You should see an output similar to the following: ```bash Current balance is 1.38866636 @@ -726,7 +786,7 @@ Remember, this same transaction was failing when you first downloaded the starter code. Now that we're using lookup tables, we can do all 22 transfers in a single transaction. -#### 6. Add more address to the lookup table +#### 6. Add more addresses to the lookup table Keep in mind that the solution we've come up with so far only supports transfers to up to 30 accounts since we only extend the lookup table once. When you factor @@ -738,55 +798,54 @@ All we need to do is go into `initializeLookupTable` and do two things: 1. Modify the existing call to `extendLookupTable` to only add the first 30 addresses (any more than that and the transaction will be too large) -2. Add a loop that will keep extending a lookup table 30 addresses at a time +2. Add a loop that will keep extending a lookup table of 30 addresses at a time until all addresses have been added -```typescript +```typescript filename="use-lookup-tables.ts" async function initializeLookupTable( - user: web3.Keypair, - connection: web3.Connection, - addresses: web3.PublicKey[], -): Promise { + user: Keypair, + connection: Connection, + addresses: PublicKey[], +): Promise { // Get the current slot const slot = await connection.getSlot(); - // Create an instruction for creating a lookup table - // and retrieve the address of the new lookup table + // Create the lookup table and retrieve its address const [lookupTableInst, lookupTableAddress] = - web3.AddressLookupTableProgram.createLookupTable({ - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) - payer: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - recentSlot: slot - 1, // The recent slot to derive lookup table's address + AddressLookupTableProgram.createLookupTable({ + authority: user.publicKey, // The authority to modify the lookup table + payer: user.publicKey, // The payer for the transaction fees + recentSlot: slot - 1, // Recent slot to derive lookup table's address }); - console.log("lookup table address:", lookupTableAddress.toBase58()); + console.log("Lookup table address:", lookupTableAddress.toBase58()); - // Create an instruction to extend a lookup table with the provided addresses - const extendInstruction = web3.AddressLookupTableProgram.extendLookupTable({ - payer: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) - lookupTable: lookupTableAddress, // The address of the lookup table to extend - addresses: addresses.slice(0, 30), // The addresses to add to the lookup table - }); + // Helper function to extend the lookup table in batches + const extendLookupTable = async (remainingAddresses: PublicKey[]) => { + while (remainingAddresses.length > 0) { + const toAdd = remainingAddresses.slice(0, 30); // Add up to 30 addresses + remainingAddresses = remainingAddresses.slice(30); - await sendV0Transaction(connection, user, [ - lookupTableInst, - extendInstruction, - ]); + const extendInstruction = AddressLookupTableProgram.extendLookupTable({ + payer: user.publicKey, + authority: user.publicKey, + lookupTable: lookupTableAddress, + addresses: toAdd, + }); - var remaining = addresses.slice(30); + // Send the transaction to extend the lookup table with the new addresses + await sendVersionedTransaction(connection, user, [extendInstruction]); + } + }; - while (remaining.length > 0) { - const toAdd = remaining.slice(0, 30); - remaining = remaining.slice(30); - const extendInstruction = web3.AddressLookupTableProgram.extendLookupTable({ - payer: user.publicKey, // The payer (i.e., the account that will pay for the transaction fees) - authority: user.publicKey, // The authority (i.e., the account with permission to modify the lookup table) - lookupTable: lookupTableAddress, // The address of the lookup table to extend - addresses: toAdd, // The addresses to add to the lookup table - }); + // Send the initial transaction to create the lookup table and add the first 30 addresses + const initialBatch = addresses.slice(0, 30); + const remainingAddresses = addresses.slice(30); - await sendV0Transaction(connection, user, [extendInstruction]); - } + await sendVersionedTransaction(connection, user, [lookupTableInst]); + + // Extend the lookup table with the remaining addresses, if any + await extendLookupTable(initialBatch); + await extendLookupTable(remainingAddresses); return lookupTableAddress; } @@ -799,7 +858,7 @@ look at the final solution code you can ## Challenge -As a challenge, experiment with deactivating, closing and freezing lookup +As a challenge, experiment with deactivating, closing, and freezing lookup tables. Remember that you need to wait for a lookup table to finish deactivating before you can close it. Also, if a lookup table is frozen, it cannot be modified (deactivated or closed), so you will have to test separately or use diff --git a/content/courses/program-optimization/program-architecture.md b/content/courses/program-optimization/program-architecture.md index d1d05e1f6..e8682c8e5 100644 --- a/content/courses/program-optimization/program-architecture.md +++ b/content/courses/program-optimization/program-architecture.md @@ -13,7 +13,7 @@ description: "Design your Solana programs efficiently." - If your data accounts are too large for the Stack, wrap them in `Box` to allocate them to the Heap - Use Zero-Copy to deal with accounts that are too large for `Box` (< 10MB) -- The size and the order of fields in an account matter; put variable length +- The size and the order of fields in an account matter; put the variable length fields at the end - Solana can process in parallel, but you can still run into bottlenecks; be mindful of "shared" accounts that all users interacting with the program have @@ -31,28 +31,29 @@ with the code. And you, as the designer, need to think about: These questions are even more important when developing for a blockchain. Not only are resources more limited than in a typical computing environment, you're -also dealing with people's assets; code has a cost now. +also dealing with people's assets. We'll leave most of the asset handling discussion to -[security course lesson](/content/courses/program-security/security-intro), but -it's important to note the nature of resource limitations in Solana development. -There are, of course, limitations in a typical development environment, but -there are limitations unique to blockchain and Solana development such as how -much data can be stored in an account, the cost to store that data, and how many -compute units are available per transaction. You, the program designer, have to -be mindful of these limitations to create programs that are affordable, fast, -safe, and functional. Today we will be delving into some of the more advance -considerations that should be taken when creating Solana programs. +[security course lesson](/content/courses/program-security/security-intro.md), +but it's important to note the nature of resource limitations in Solana +development. There are, of course, limitations in a typical development +environment, but there are limitations unique to blockchain and Solana +development such as how much data can be stored in an account, the cost to store +that data, and how many compute units are available per transaction. You, the +program designer, have to be mindful of these limitations to create programs +that are affordable, fast, safe, and functional. Today we will be delving into +some of the more advanced considerations that should be taken when creating +Solana programs. ### Dealing With Large Accounts In modern application programming, we don't often have to think about the size -of the data structures we are using. You want to make a string? You can put a -4000 character limit on it if you want to avoid abuse, but it's probably not an +of the data structures we are using. Do you want to make a string? You can put a +4000-character limit on it if you want to avoid abuse, but it's probably not an issue. Want an integer? They're pretty much always 32-bit for convenience. -In high level languages, you are in the data-land-o-plenty! Now, in Solana land, -we pay per byte stored (rent) and have limits on heap, stack and account sizes. +In high-level languages, you are in the data-land-o-plenty! Now, in Solana land, +we pay per byte stored (rent) and have limits on heap, stack, and account sizes. We have to be a little more crafty with our bytes. There are two main concerns we are going to be looking at in this section: @@ -61,31 +62,32 @@ we are going to be looking at in this section: introduce you to the concept of data sizes here. 2. When operating on larger data, we run into - [Stack](https://solana.com/docs/onchain-programs/faq#stack) and - [Heap](https://solana.com/docs/onchain-programs/faq#heap-size) constraints - - to get around these, we'll look at using Box and Zero-Copy. + [Stack](https://solana.com/docs/programs/faq#stack) and + [Heap](https://solana.com/docs/programs/faq#heap-size) constraints - to get + around these, we'll look at using Box and Zero-Copy. #### Sizes -In Solana a transaction's fee payer pays for each byte stored onchain. We call -this [rent](https://solana.com/docs/core/fees). - -rent is a bit of a misnomer since it never actually gets -permanently taken. Once you deposit rent into the account, that data can stay -there forever or you can get refunded the rent if you close the account. Rent -used to be an actual thing, but now there's an enforced minimum rent exemption. -You can read about it in -[the Solana documentation](https://solana.com/docs/intro/rent). - -Rent etymology aside, putting data on the blockchain can be expensive. It's why -NFT attributes and associated files, like the image, are stored offchain. You -ultimately want to strike a balance that leaves your program highly functional -without becoming so expensive that your users don't want to pay to open the data -account. - -The first thing you need to know before you can start optimizing for space in -your program is the size of each of your structs. Below is a very helpful list -from the +In Solana, a transaction's fee payer pays for each byte stored onchain. This is +called [rent](https://solana.com/docs/core/fees#rent). + + + +Rent is a bit of a misnomer since it never gets permanently taken. Once you +deposit rent into the account, that data can stay there forever, or you can get +refunded the rent if you close the account. Previously, rent was paid in +intervals, similar to traditional rent, but now there's an enforced minimum +balance for rent exemption. You can read more about it in +[the Solana documentation](https://solana.com/docs/core/fees#rent-exempt). + + +Putting data on the blockchain can be expensive, which is why NFT attributes and +associated files, like images, are stored offchain. The goal is to strike a +balance between keeping your program highly functional and ensuring that users +aren't discouraged by the cost of storing data onchain. + +The first step in optimizing for space in your program is understanding the size +of your structs. Below is a helpful reference from the [Anchor Book](https://book.anchor-lang.com/anchor_references/space.html). @@ -110,7 +112,7 @@ from the Knowing these, start thinking about little optimizations you might take in a program. For example, if you have an integer field that will only ever reach 100, don't use a u64/i64, use a u8. Why? Because a u64 takes up 8 bytes, with a -max value of 2^64 or 1.84 \* 10^19. Thats a waste of space since you only need +max value of 2^64 or 1.84 \* 10^19. That's a waste of space since you only need to accommodate numbers up to 100. A single byte will give you a max value of 255 which, in this case, would be sufficient. Similarly, there's no reason to use i8 if you'll never have negative numbers. @@ -158,7 +160,9 @@ where that entire `SomeBigDataStruct` gets stored in memory and since 5000 bytes, or 5KB, is greater than the 4KB limit, it will throw a stack error. So how do we fix this? -The answer is the **`Box`** type! +The answer is the +[**`Box`**](https://docs.rs/anchor-lang/latest/anchor_lang/accounts/boxed/index.html) +type! ```rust #[account] @@ -175,14 +179,14 @@ pub struct SomeFunctionContext<'info> { In Anchor, **`Box`** is used to allocate the account to the Heap, not the Stack. Which is great since the Heap gives us 32KB to work with. The best part is you don't have to do anything different within the function. All you need to -do is add `Box<...>` around all of your big data accounts. +do is add `Box<…>` around all of your big data accounts. But Box is not perfect. You can still overflow the stack with sufficiently large accounts. We'll learn how to fix this in the next section. #### Zero Copy -Okay, so now you can deal with medium sized accounts using `Box`. But what if +Okay, so now you can deal with medium-sized accounts using `Box`. But what if you need to use really big accounts like the max size of 10MB? Take the following as an example: @@ -220,13 +224,13 @@ To understand what's happening here, take a look at the [rust Anchor documentation](https://docs.rs/anchor-lang/latest/anchor_lang/attr.account.html) > Other than being more efficient, the most salient benefit [`zero_copy`] -> provides is the ability to define account types larger than the max stack or -> heap size. When using borsh, the account has to be copied and deserialized -> into a new data structure and thus is constrained by stack and heap limits -> imposed by the BPF VM. With zero copy deserialization, all bytes from the -> account's backing `RefCell<&mut [u8]>` are simply re-interpreted as a -> reference to the data structure. No allocations or copies necessary. Hence the -> ability to get around stack and heap limitations. +> provides the ability to define account types larger than the max stack or heap +> size. When using borsh, the account has to be copied and deserialized into a +> new data structure and thus is constrained by stack and heap limits imposed by +> the BPF VM. With zero copy deserialization, all bytes from the account's +> backing `RefCell<&mut [u8]>` are simply re-interpreted as a reference to the +> data structure. No allocations or copies are necessary. Hence the ability to +> get around stack and heap limitations. Basically, your program never actually loads zero-copy account data into the stack or heap. It instead gets pointer access to the raw data. The @@ -244,7 +248,7 @@ pub struct ConceptZeroCopy<'info> { } ``` -Instead, your client has to create the large account and pay for it's rent in a +Instead, your client has to create a large account and pay for its rent in a separate instruction. ```typescript @@ -271,8 +275,8 @@ const txHash = await program.methods .rpc(); ``` -The second caveat is that your'll have to call one of the following methods from -inside your rust instruction function to load the account: +The second caveat is that you'll have to call one of the following methods from +inside your rust instruction handler to load the account: - `load_init` when first initializing an account (this will ignore the missing account discriminator that gets added only after the user's instruction code) @@ -289,7 +293,7 @@ let some_really_big_data = &mut ctx.accounts.some_really_big_data.load_init()?; After you do that, then you can treat the account like normal! Go ahead and experiment with this in the code yourself to see everything in action! -For a better understanding on how this all works, Solana put together a really +For a better understanding of how this all works, Solana put together a really nice [video](https://www.youtube.com/watch?v=zs_yU0IuJxc&feature=youtu.be) and [code](https://github.com/solana-developers/anchor-zero-copy-example) explaining Box and Zero-Copy in vanilla Solana. @@ -297,8 +301,8 @@ Box and Zero-Copy in vanilla Solana. ### Dealing with Accounts Now that you know the nuts and bolts of space consideration on Solana, let's -look at some higher level considerations. In Solana, everything is an account, -so for the next couple sections we'll look at some account architecture +look at some higher-level considerations. In Solana, everything is an account, +so for the next couple sections, we'll look at some account architecture concepts. #### Data Order @@ -321,16 +325,17 @@ the location of `id` on the memory map. To make this more clear, observe what this account's data looks like onchain when `flags` has four items in the vector vs eight items. If you were to call `solana account ACCOUNT_KEY` you'd get a data dump like the following: +`solana account ACCOUNT_KEY` you'd get a data dump like the following: ```rust 0000: 74 e4 28 4e d9 ec 31 0a -> Account Discriminator (8) -0008: 04 00 00 00 11 22 33 44 -> Vec Size (4) | Data 4*(1) +0008: 04 00 00 00 11 22 33 44 -> Vec Size (4) | Data 4*(1) 0010: DE AD BE EF -> id (4) --- vs --- 0000: 74 e4 28 4e d9 ec 31 0a -> Account Discriminator (8) -0008: 08 00 00 00 11 22 33 44 -> Vec Size (8) | Data 4*(1) +0008: 08 00 00 00 11 22 33 44 -> Vec Size (8) | Data 4*(1) 0010: 55 66 77 88 DE AD BE EF -> Data 4*(1) | id (4) ``` @@ -344,11 +349,10 @@ the data in the `flags` field took up four more bytes. The main problem with this is lookup. When you query Solana, you use filters that look at the raw data of an account. These are called a `memcmp` filters, or memory compare filters. You give the filter an `offset` and `bytes`, and the -filter then looks directly at the memory, offsetting from the start by the -`offset` you provide, and compares the bytes in memory to the `bytes` you -provide. +filter then looks directly at the memory, offset from the start by the `offset` +you provide, and compares the bytes in memory to the `bytes` you provide. -For example, you know that the `flags` struct will always start at address +For example, you know that the `flags` struct will always start at the address 0x0008 since the first 8 bytes contain the account discriminator. Querying all accounts where the `flags` length is equal to four is possible because we _know_ that the four bytes at 0x0008 represent the length of the data in `flags`. Since @@ -368,12 +372,13 @@ const states = await program.account.badState.all([ However, if you wanted to query by the `id`, you wouldn't know what to put for the `offset` since the location of `id` is variable based on the length of `flags`. That doesn't seem very helpful. IDs are usually there to help with +`flags`. That doesn't seem very helpful. IDs are usually there to help with queries! The simple fix is to flip the order. ```rust -#[account] // Anchor hides the account disriminator +#[account] // Anchor hides the account discriminator pub struct GoodState { - pub id: u32 // 0xDEAD_BEEF + pub id: u32 // 0xDEAD_BEEF pub flags: Vec, // 0x11, 0x22, 0x33 ... } ``` @@ -383,83 +388,229 @@ accounts based on all the fields up to the first variable length field. To echo the beginning of this section: As a rule of thumb, keep all variable length structs at the end of the account. -#### For Future Use +#### Account Flexibility and Future-Proofing -In certain cases, consider adding extra, unused bytes to you accounts. These are -held in reserve for flexibility and backward compatibility. Take the following -example: +When developing Solana programs, it's crucial to design your account structures +with future upgrades and backward compatibility in mind. Solana offers powerful +features like account resizing and Anchor's `InitSpace` attribute to handle +these challenges efficiently. Let's explore a more dynamic and flexible approach +using a game state example: ```rust +use anchor_lang::prelude::*; + #[account] -pub struct GameState { +#[derive(InitSpace)] +pub struct GameState { // V1 + pub version: u8, pub health: u64, pub mana: u64, - pub event_log: Vec + pub experience: Option, + #[max_len(50)] + pub event_log: Vec } ``` -In this simple game state, a character has `health`, `mana`, and an event log. -If at some point you are making game improvements and want to add an -`experience` field, you'd hit a snag. The `experience` field should be a number -like a `u64`, which is simple enough to add. You can -[reallocate the account](/developers/courses/onchain-development/anchor-pdas) -and add space. - -However, to keep dynamic length fields, like `event_log`, at the end of the -struct, you would need to do some memory manipulation on all reallocated -accounts to move the location of `event_log`. This can be complicated and makes -querying accounts far more difficult. You'll end up in a state where -non-migrated accounts have `event_log` in one location and migrated accounts in -another. The old `GameState` without `experience` and the new `GameState` with -`experience` in it are no longer compatible. Old accounts won't serialize when -used where new accounts are expected. Queries will be far more difficult. You'll -likely need to create a migration system and ongoing logic to maintain backward -compatibility. Ultimately, it begins to seem like a bad idea. - -Fortunately, if you think ahead, you can add a `for_future_use` field that -reserves some bytes where you expect to need them most. +In this GameState, we have: + +- A `version` field to track account structure changes +- Basic character attributes (`health`, `mana`) +- An `experience` field as `Option` for backward compatibility +- An `event_log` with a specified maximum length + +Key advantages of this approach: + +1. **Automatic Space Calculation**: The `InitSpace` attribute automatically + calculates the required account space. +2. **Versioning**: The `version` field allows for easy identification of account + structure versions. +3. **Flexible Fields**: Using `Option` for new fields maintains compatibility + with older versions. +4. **Defined Limits**: The `max_len` attribute on `Vec` fields clearly + communicates size constraints. + +When you need to upgrade your account structure, such as increasing the length +of `event_log` or adding new fields, you can use a single upgrade instruction +with Anchor's `realloc` constraint: + +1. Update the `GameState` struct with new fields or increased `max_len` + attributes: + + ```rust + #[account] + #[derive(InitSpace)] + pub struct GameState { + pub version: u8, + pub health: u64, + pub mana: u64, + pub experience: Option, + #[max_len(100)] // Increased from 50 + pub event_log: Vec, + pub new_field: Option, // Added new field + } + ``` + +2. Use a single `UpgradeGameState` context for all upgrades with Anchor's + `realloc` constraint for `GameState`: + + ```rust + #[derive(Accounts)] + pub struct UpgradeGameState<'info> { + #[account( + mut, + realloc = GameState::INIT_SPACE, + realloc::payer = payer, + realloc::zero = false, + )] + pub game_state: Account<'info, GameState>, + #[account(mut)] + pub payer: Signer<'info>, + pub system_program: Program<'info, System>, + } + ``` + +3. Implement the upgrade logic in a single function: + + ```rust + pub fn upgrade_game_state(ctx: Context) -> Result<()> { + let game_state = &mut ctx.accounts.game_state; + + match game_state.version { + 1 => { + game_state.version = 2; + game_state.experience = Some(0); + msg!("Upgraded to version 2"); + }, + 2 => { + game_state.version = 3; + game_state.new_field = Some(0); + msg!("Upgraded to version 3"); + }, + _ => return Err(ErrorCode::AlreadyUpgraded.into()), + } + + Ok(()) + } + ``` + +The example to demonstrate this approach: ```rust +use anchor_lang::prelude::*; + #[account] -pub struct GameState { //V1 +#[derive(InitSpace)] +pub struct GameState { + pub version: u8, pub health: u64, pub mana: u64, - pub for_future_use: [u8; 128], - pub event_log: Vec + pub experience: Option, + #[max_len(100)] // Increased from 50 + pub event_log: Vec, + pub new_field: Option, } -``` -That way, when you go to add `experience` or something similar, it looks like -this and both the old and new accounts are compatible. +#[derive(Accounts)] +pub struct UpgradeGameState<'info> { + #[account( + mut, + realloc = GameState::INIT_SPACE, + realloc::payer = payer, + realloc::zero = false, + )] + pub game_state: Account<'info, GameState>, + #[account(mut)] + pub payer: Signer<'info>, + pub system_program: Program<'info, System>, +} -```rust -#[account] -pub struct GameState { //V2 - pub health: u64, - pub mana: u64, - pub experience: u64, - pub for_future_use: [u8; 120], - pub event_log: Vec +#[program] +pub mod your_program { + use super::*; + + // ... other instructions ... + + pub fn upgrade_game_state(ctx: Context) -> Result<()> { + let game_state = &mut ctx.accounts.game_state; + + match game_state.version { + 1 => { + game_state.version = 2; + game_state.experience = Some(0); + msg!("Upgraded to version 2"); + }, + 2 => { + game_state.version = 3; + game_state.new_field = Some(0); + msg!("Upgraded to version 3"); + }, + _ => return Err(ErrorCode::AlreadyUpgraded.into()), + } + + Ok(()) + } +} + +#[error_code] +pub enum ErrorCode { + #[msg("Account is already at the latest version")] + AlreadyUpgraded, } ``` -These extra bytes do add to the cost of using your program. However, it seems -well worth the benefit in most cases. +This approach: + +- Uses the Anchor's + [`realloc`](https://docs.rs/anchor-lang/latest/anchor_lang/derive.Accounts.html#normal-constraints) + constraint to automatically handle account resizing. +- The + [`InitSpace`](https://docs.rs/anchor-lang/latest/anchor_lang/derive.InitSpace.html) + derive macro automatically implements the `Space` trait for the `GameState` + struct. This trait includes the + [`INIT_SPACE`](https://docs.rs/anchor-lang/latest/anchor_lang/trait.Space.html#associatedconstant.INIT_SPACE) + associated constant , which calculates the total space required for the + account. +- Designates a payer for any additional rent with `realloc::payer = payer`. +- Keeps existing data with `realloc::zero = false`. + + + +Account data can be increased within a single call by up to +`solana_program::entrypoint::MAX_PERMITTED_DATA_INCREASE` bytes. -So as a general rule of thumb: anytime you think your account types have the -potential to change in a way that will require some kind of complex migration, -add in some `for_future_use` bytes. +Memory used to grow is already zero-initialized upon program entrypoint and +re-zeroing it wastes compute units. If within the same call a program reallocs +from larger to smaller and back to larger again the new space could contain +stale data. Pass `true` for `zero_init` in this case, otherwise compute units +will be wasted re-zero-initializing. + + + +While account resizing is powerful, use it judiciously. Consider the trade-offs +between frequent resizing and initial allocation based on your specific use case +and expected growth patterns. + +- Always ensure your account remains rent-exempt before resizing. +- The payer of the transaction is responsible for providing the additional + lamports. +- Consider the cost implications of frequent resizing in your program design. + + +In native Rust, you can resize accounts using the `realloc()` method. For more +details, refer to the +[account resizing program](/content/cookbook/programs/change-account-size.md). #### Data Optimization The idea here is to be aware of wasted bits. For example, if you have a field that represents the month of the year, don't use a `u64`. There will only ever +that represents the month of the year, don't use a `u64`. There will only ever be 12 months. Use a `u8`. Better yet, use a `u8` Enum and label the months. To get even more aggressive on bit savings, be careful with booleans. Look at the below struct composed of eight boolean flags. While a boolean _can_ be represented as a single bit, borsh deserialization will allocate an entire byte -to each of these fields. that means that eight booleans winds up being eight +to each of these fields. That means that eight booleans wind up being eight bytes instead of eight bits, an eight times increase in size. ```rust @@ -536,6 +687,10 @@ Depending on the seeding you can create all sorts of relationships: program. For example, if your program needs a lookup table, you could seed it with `seeds=[b"Lookup"]`. Just be careful to provide appropriate access restrictions. +- One-Per-Owner - Say you're creating a video game player account and you only + want one player account per wallet. Then you'd seed the account with + `seeds=[b"PLAYER", owner.key().as_ref()]`. This way, you'll always know where + to look for a wallet's player account **and** there can only ever be one of - One-Per-Owner - Say you're creating a video game player account and you only want one player account per wallet. Then you'd seed the account with `seeds=[b"PLAYER", owner.key().as_ref()]`. This way, you'll always know where @@ -583,7 +738,7 @@ seeds=[b"Podcast", channel_account.key().as_ref(), episode_number.to_be_bytes(). You can always find the channel account for a particular owner. And since the channel stores the number of episodes created, you always know the upper bound -of where to search for queries. Additionally you always know what index to +of where to search for queries. Additionally, you always know what index to create a new episode at: `index = episodes_created`. ```rust @@ -599,24 +754,24 @@ Podcast X: seeds=[b"Podcast", channel_account.key().as_ref(), X.to_be_bytes().as One of the main reasons to choose Solana for your blockchain environment is its parallel transaction execution. That is, Solana can run transactions in parallel as long as those transactions aren't trying to write data to the same account. -This improves program throughput out of the box, but with some proper planning +This improves program throughput out of the box, but with some proper planning, you can avoid concurrency issues and really boost your program's performance. #### Shared Accounts If you've been around crypto for a while, you may have experienced a big NFT -mint event. A new NFT project is coming out, everyone is really excited for it, -and then the candymachine goes live. It's a mad dash to click +mint event. A new NFT project is coming out, everyone is really excited about +it, and then the candymachine goes live. It's a mad dash to click `accept transaction` as fast as you can. If you were clever, you may have -written a bot to enter in the transactions faster that the website's UI could. -This mad rush to mint creates a lot of failed transactions. But why? Because -everyone is trying to write data to the same Candy Machine account. +written a bot to enter the transactions faster than the website's UI could. This +mad rush to mint creates a lot of failed transactions. But why? Because everyone +is trying to write data to the same Candy Machine account. Take a look at a simple example: Alice and Bob are trying to pay their friends Carol and Dean respectively. All -four accounts change, but neither depend on each other. Both transactions can -run at the same time. +four accounts change, but neither depends on other. Both transactions can run at +the same time. ```rust Alice -- pays --> Carol @@ -629,18 +784,18 @@ issues. ```rust Alice -- pays --> | - -- > Carol + -- > Carol Bob -- pays --- | ``` Since both of these transactions write to Carol's token account, only one of -them can go through at a time. Fortunately, Solana is wicked fast, so it'll +them can go through at a time. Fortunately, Solana is very fast, so it'll probably seem like they get paid at the same time. But what happens if more than just Alice and Bob try to pay Carol? ```rust Alice -- pays --> | - -- > Carol + -- > Carol x1000 -- pays --- | Bob -- pays --- | ``` @@ -675,7 +830,7 @@ pub struct DonationTally { } ``` -First let's look at the suboptimal solution. +First, let's look at the suboptimal solution. ```rust pub fn run_concept_shared_account_bottleneck(ctx: Context, lamports_to_donate: u64) -> Result<()> { @@ -708,7 +863,7 @@ pub fn run_concept_shared_account_bottleneck(ctx: ContextThis lab was created with Anchor version `0.28.0` in mind. -If there are problems compiling, please refer to the -[solution code](https://github.com/Unboxed-Software/anchor-rpg/tree/challenge-solution) -for the environment setup. + + +This lab was created with Anchor version `0.30.1` in mind. If there are problems +compiling, please refer to the +[solution code](https://github.com/solana-developers/anchor-rpg/tree/main) for +the environment setup. -Next, replace the program ID in `programs/rpg/lib.rs` and `Anchor.toml` with the -program ID shown when you run `anchor keys list`. +Next, run the command `anchor keys sync` that will automatically sync your +program ID. This command updates the program IDs in your program files +(including `Anchor.toml`) with the actual `pubkey` from the program keypair +file. -Finally, let's scaffold out the program in the `lib.rs` file. To make following -along easier, we're going to keep everything in one file. We'll augment this -with section comments for better organization and navigation. Copy the following +Finally, let's scaffold out the program in the `lib.rs` file. Copy the following into your file before we get started: -```rust +```rust filename="lib.rs" use anchor_lang::prelude::*; -use anchor_lang::system_program::{Transfer, transfer}; use anchor_lang::solana_program::log::sol_log_compute_units; declare_id!("YOUR_KEY_HERE__YOUR_KEY_HERE"); -// ----------- ACCOUNTS ---------- - -// ----------- GAME CONFIG ---------- - -// ----------- STATUS ---------- - -// ----------- INVENTORY ---------- - -// ----------- HELPER ---------- - -// ----------- CREATE GAME ---------- - -// ----------- CREATE PLAYER ---------- +#[program] +pub mod rpg { + use super::*; -// ----------- SPAWN MONSTER ---------- + pub fn create_game(ctx: Context, max_items_per_player: u8) -> Result<()> { + run_create_game(ctx, max_items_per_player)?; + sol_log_compute_units(); + Ok(()) + } -// ----------- ATTACK MONSTER ---------- + pub fn create_player(ctx: Context) -> Result<()> { + run_create_player(ctx)?; + sol_log_compute_units(); + Ok(()) + } -// ----------- REDEEM TO TREASURY ---------- + pub fn spawn_monster(ctx: Context) -> Result<()> { + run_spawn_monster(ctx)?; + sol_log_compute_units(); + Ok(()) + } -#[program] -pub mod rpg { - use super::*; + pub fn attack_monster(ctx: Context) -> Result<()> { + run_attack_monster(ctx)?; + sol_log_compute_units(); + Ok(()) + } + pub fn deposit_action_points(ctx: Context) -> Result<()> { + run_collect_action_points(ctx)?; + sol_log_compute_units(); + Ok(()) + } } ``` -#### 2. Create Account Structures +### 2. Create Account Structures Now that our initial setup is ready, let's create our accounts. We'll have 3: @@ -903,7 +1067,6 @@ Now that our initial setup is ready, let's create our accounts. We'll have 3: - `experience` - the player's experience - `kills` - number of monsters killed - `next_monster_index` - the index of the next monster to face - - `for_future_use` - 256 bytes reserved for future use - `inventory` - a vector of the player's inventory 3. `Monster` - A PDA account whose address is derived using the game account address, the player's wallet address, and an index (the one stored as @@ -912,21 +1075,55 @@ Now that our initial setup is ready, let's create our accounts. We'll have 3: - `game` - the game the monster is associated with - `hitpoints` - how many hit points the monster has left +This is the final project structure: + +```bash +src/ +├── constants.rs # Constants used throughout the program +├── error/ # Error module +│ ├── errors.rs # Custom error definitions +│ └── mod.rs # Module declarations for error handling +├── helpers.rs # Helper functions used across the program +├── instructions/ # Instruction handlers for different game actions +│ ├── attack_monster.rs # Handles attacking a monster +│ ├── collect_points.rs # Handles collecting points +│ ├── create_game.rs # Handles game creation +│ ├── create_player.rs # Handles player creation +│ ├── mod.rs # Module declarations for instructions +│ └── spawn_monster.rs # Handles spawning a new monster +├── lib.rs # Main entry point for the program +└── state/ # State module for game data structures + ├── game.rs # Game state representation + ├── mod.rs # Module declarations for state + ├── monster.rs # Monster state representation + └── player.rs # Player state representation +``` + When added to the program, the accounts should look like this: ```rust // ----------- ACCOUNTS ---------- -#[account] -pub struct Game { // 8 bytes - pub game_master: Pubkey, // 32 bytes - pub treasury: Pubkey, // 32 bytes - - pub action_points_collected: u64, // 8 bytes +// Inside `state/game.rs` +use anchor_lang::prelude::*; +#[account] +#[derive(InitSpace)] +pub struct Game { + pub game_master: Pubkey, + pub treasury: Pubkey, + pub action_points_collected: u64, pub game_config: GameConfig, } +#[derive(AnchorSerialize, AnchorDeserialize, Clone, InitSpace)] +pub struct GameConfig { + pub max_items_per_player: u8 +} + +// Inside `state/player.rs` +use anchor_lang::prelude::*; #[account] +#[derive(InitSpace)] pub struct Player { // 8 bytes pub player: Pubkey, // 32 bytes pub game: Pubkey, // 32 bytes @@ -939,92 +1136,125 @@ pub struct Player { // 8 bytes pub kills: u64, // 8 bytes pub next_monster_index: u64, // 8 bytes - pub for_future_use: [u8; 256], // Attack/Speed/Defense/Health/Mana?? Metadata?? - pub inventory: Vec, // Max 8 items } -#[account] -pub struct Monster { // 8 bytes - pub player: Pubkey, // 32 bytes - pub game: Pubkey, // 32 bytes +#[derive(AnchorSerialize, AnchorDeserialize, Clone, InitSpace)] +pub struct InventoryItem { + pub name: [u8; 32], // Fixed Name up to 32 bytes + pub amount: u64 +} + - pub hitpoints: u64, // 8 bytes +// Inside `state/monster.rs` +use anchor_lang::prelude::*; +#[account] +#[derive(InitSpace)] +pub struct Monster { + pub player: Pubkey, + pub game: Pubkey, + pub hitpoints: u64, } ``` There aren't a lot of complicated design decisions here, but let's talk about -the `inventory` and `for_future_use` fields on the `Player` struct. Since -`inventory` is variable in length we decided to place it at the end of the -account to make querying easier. We've also decided it's worth spending a little -extra money on rent exemption to have 256 bytes of reserved space in the -`for_future_use` field. We could exclude this and simply reallocate accounts if -we need to add fields in the future, but adding it now simplifies things for us -in the future. - -If we chose to reallocate in the future, we'd need to write more complicated -queries and likely couldn't query in a single call based on `inventory`. -Reallocating and adding a field would move the memory position of `inventory`, -leaving us to write complex logic to query accounts with various structures. +the `inventory` field on the `Player` struct. Since `inventory` is variable in +length we decided to place it at the end of the account to make querying easier. -#### 3. Create ancillary types +### 3. Create Ancillary Types The next thing we need to do is add some of the types our accounts reference that we haven't created yet. Let's start with the game config struct. Technically, this could have gone in the `Game` account, but it's nice to have some separation and encapsulation. -This struct should store the max items allowed per player and some bytes for -future use. Again, the bytes for future use here help us avoid complexity in the -future. Reallocating accounts works best when you're adding fields at the end of -an account rather than in the middle. If you anticipate adding fields in the -middle of existing date, it might make sense to add some "future use" bytes up -front. +This struct should store the max items allowed per player. -```rust +```rust filename="game.rs" // ----------- GAME CONFIG ---------- - -#[derive(Clone, AnchorSerialize, AnchorDeserialize)] +// Inside `state/game.rs` +#[derive(AnchorSerialize, AnchorDeserialize, Clone, InitSpace)] pub struct GameConfig { - pub max_items_per_player: u8, - pub for_future_use: [u64; 16], // Health of Enemies?? Experience per item?? Action Points per Action?? + pub max_items_per_player: u8 } ``` +Reallocating accounts in Solana programs has become more flexible due to +Anchor's +[`realloc`](https://docs.rs/anchor-lang/latest/anchor_lang/derive.Accounts.html#normal-constraints) +account constraint and Solana's account resizing capabilities. While adding +fields at the end of an account structure remains straightforward, modern +practices allow for more adaptable designs: + +1. Use Anchor's `realloc` constraint in the `#[account()]` attribute to specify + resizing parameters: + + ```rust + #[account( + mut, + realloc = AccountStruct::INIT_SPACE, + realloc::payer = payer, + realloc::zero = false, + )] + ``` + +2. Use Anchor's `InitSpace` attribute to automatically calculate account space. +3. For variable-length fields like `Vec` or `String`, use the `max_len` + attribute to specify maximum size. +4. When adding new fields, consider using `Option` for backward + compatibility. +5. Implement a versioning system in your account structure to manage different + layouts. +6. Ensure the payer account is mutable and a signer to cover reallocation costs: + + ```rust + #[account(mut)] + pub payer: Signer<'info>, + ``` + +This approach allows for easier account structure evolution, regardless of where +new fields are added, while maintaining efficient querying and +serialization/deserialization through Anchor's built-in capabilities. It enables +resizing accounts as needed, automatically handling rent-exemption. + Next, let's create our status flags. Remember, we _could_ store our flags as booleans but we save space by storing multiple flags in a single byte. Each flag takes up a different bit within the byte. We can use the `<<` operator to place `1` in the correct bit. -```rust +```rust filename="constants.rs" // ----------- STATUS ---------- -const IS_FROZEN_FLAG: u8 = 1 << 0; -const IS_POISONED_FLAG: u8 = 1 << 1; -const IS_BURNING_FLAG: u8 = 1 << 2; -const IS_BLESSED_FLAG: u8 = 1 << 3; -const IS_CURSED_FLAG: u8 = 1 << 4; -const IS_STUNNED_FLAG: u8 = 1 << 5; -const IS_SLOWED_FLAG: u8 = 1 << 6; -const IS_BLEEDING_FLAG: u8 = 1 << 7; -const NO_EFFECT_FLAG: u8 = 0b00000000; +pub const IS_FROZEN_FLAG: u8 = 1 << 0; +pub const IS_POISONED_FLAG: u8 = 1 << 1; +pub const IS_BURNING_FLAG: u8 = 1 << 2; +pub const IS_BLESSED_FLAG: u8 = 1 << 3; +pub const IS_CURSED_FLAG: u8 = 1 << 4; +pub const IS_STUNNED_FLAG: u8 = 1 << 5; +pub const IS_SLOWED_FLAG: u8 = 1 << 6; +pub const IS_BLEEDING_FLAG: u8 = 1 << 7; + +pub const NO_EFFECT_FLAG: u8 = 0b00000000; +pub const ANCHOR_DISCRIMINATOR: usize = 8; +pub const MAX_INVENTORY_ITEMS: usize = 8; ``` Finally, let's create our `InventoryItem`. This should have fields for the -item's name, amount, and some bytes reserved for future use. +item's name and amount. -```rust +```rust filename="player.rs" // ----------- INVENTORY ---------- -#[derive(Clone, AnchorSerialize, AnchorDeserialize)] +// Inside `state/player.rs` +#[derive(AnchorSerialize, AnchorDeserialize, Clone, InitSpace)] pub struct InventoryItem { pub name: [u8; 32], // Fixed Name up to 32 bytes - pub amount: u64, - pub for_future_use: [u8; 128], // Metadata?? // Effects // Flags? + pub amount: u64 } + ``` -#### 4. Create helper function for spending action points +### 4. Create a helper function for spending action points The last thing we'll do before writing the program's instructions is create a helper function for spending action points. Players will send action points @@ -1040,26 +1270,40 @@ that will send the lamports from that account to the treasury in one fell swoop. This alleviates any concurrency issues since every player has their own account, but also allows the program to retrieve those lamports at any time. -```rust +```rust filename="helper.rs" // ----------- HELPER ---------- +// Inside /src/helpers.rs +use anchor_lang::{prelude::*, system_program}; + +use crate::{error::RpgError, Player}; + pub fn spend_action_points<'info>( action_points: u64, player_account: &mut Account<'info, Player>, player: &AccountInfo<'info>, system_program: &AccountInfo<'info>, ) -> Result<()> { - - player_account.action_points_spent = player_account.action_points_spent.checked_add(action_points).unwrap(); - player_account.action_points_to_be_collected = player_account.action_points_to_be_collected.checked_add(action_points).unwrap(); - - let cpi_context = CpiContext::new( - system_program.clone(), - Transfer { - from: player.clone(), - to: player_account.to_account_info().clone(), - }); - transfer(cpi_context, action_points)?; + player_account.action_points_spent = player_account + .action_points_spent + .checked_add(action_points) + .ok_or(error!(RpgError::ArithmeticOverflow))?; + + player_account.action_points_to_be_collected = player_account + .action_points_to_be_collected + .checked_add(action_points) + .ok_or(error!(RpgError::ArithmeticOverflow))?; + + system_program::transfer( + CpiContext::new( + system_program.to_account_info(), + system_program::Transfer { + from: player.to_account_info(), + to: player_account.to_account_info(), + }, + ), + action_points, + )?; msg!("Minus {} action points", action_points); @@ -1067,7 +1311,7 @@ pub fn spend_action_points<'info>( } ``` -#### 5. Create Game +### 5. Create Game Our first instruction will create the `game` account. Anyone can be a `game_master` and create their own game, but once a game has been created there @@ -1077,48 +1321,54 @@ For one, the `game` account is a PDA using its `treasury` wallet. This ensures that the same `game_master` can run multiple games if they use a different treasury for each. -Also note that the `treasury` is a signer on the instruction. This is to make -sure whoever is creating the game has the private keys to the `treasury`. This -is a design decision rather than "the right way." Ultimately, it's a security -measure to ensure the game master will be able to retrieve their funds. + -```rust +The `treasury` is a signer on the instruction. This is to make sure whoever is +creating the game has the private keys to the `treasury`. This is a design +decision rather than "the right way." Ultimately, it's a security measure to +ensure the game master will be able to retrieve their funds. + +```rust filename="create_game.rs" // ----------- CREATE GAME ---------- +// Inside src/instructions/create_game.rs +use anchor_lang::prelude::*; + +use crate::{error::RpgError, Game, ANCHOR_DISCRIMINATOR}; + #[derive(Accounts)] pub struct CreateGame<'info> { #[account( init, - seeds=[b"GAME", treasury.key().as_ref()], + seeds = [b"GAME", treasury.key().as_ref()], bump, payer = game_master, - space = std::mem::size_of::()+ 8 + space = ANCHOR_DISCRIMINATOR + Game::INIT_SPACE )] pub game: Account<'info, Game>, - #[account(mut)] pub game_master: Signer<'info>, - - /// CHECK: Need to know they own the treasury pub treasury: Signer<'info>, pub system_program: Program<'info, System>, } pub fn run_create_game(ctx: Context, max_items_per_player: u8) -> Result<()> { + if max_items_per_player == 0 { + return Err(error!(RpgError::InvalidGameConfig)); + } - ctx.accounts.game.game_master = ctx.accounts.game_master.key().clone(); - ctx.accounts.game.treasury = ctx.accounts.treasury.key().clone(); - - ctx.accounts.game.action_points_collected = 0; - ctx.accounts.game.game_config.max_items_per_player = max_items_per_player; + let game = &mut ctx.accounts.game; + game.game_master = ctx.accounts.game_master.key(); + game.treasury = ctx.accounts.treasury.key(); + game.action_points_collected = 0; + game.game_config.max_items_per_player = max_items_per_player; msg!("Game created!"); - Ok(()) } ``` -#### 6. Create Player +### 6. Create Player Our second instruction will create the `player` account. There are three tradeoffs to note about this instruction: @@ -1133,62 +1383,67 @@ tradeoffs to note about this instruction: 100 lamports, but this could be something added to the game config in the future. -```rust +```rust filename="create_player.rs" // ----------- CREATE PLAYER ---------- + +// Inside src/instructions/create_player.rs +use anchor_lang::prelude::*; + +use crate::{ + error::RpgError, helpers::spend_action_points, Game, Player, ANCHOR_DISCRIMINATOR, + CREATE_PLAYER_ACTION_POINTS, NO_EFFECT_FLAG, +}; + #[derive(Accounts)] pub struct CreatePlayer<'info> { pub game: Box>, - #[account( init, - seeds=[ + seeds = [ b"PLAYER", game.key().as_ref(), player.key().as_ref() ], bump, payer = player, - space = std::mem::size_of::() + std::mem::size_of::() * game.game_config.max_items_per_player as usize + 8) - ] + space = ANCHOR_DISCRIMINATOR + Player::INIT_SPACE + )] pub player_account: Account<'info, Player>, - #[account(mut)] pub player: Signer<'info>, - pub system_program: Program<'info, System>, } pub fn run_create_player(ctx: Context) -> Result<()> { - - ctx.accounts.player_account.player = ctx.accounts.player.key().clone(); - ctx.accounts.player_account.game = ctx.accounts.game.key().clone(); - - ctx.accounts.player_account.status_flag = NO_EFFECT_FLAG; - ctx.accounts.player_account.experience = 0; - ctx.accounts.player_account.kills = 0; + let player_account = &mut ctx.accounts.player_account; + player_account.player = ctx.accounts.player.key(); + player_account.game = ctx.accounts.game.key(); + player_account.status_flag = NO_EFFECT_FLAG; + player_account.experience = 0; + player_account.kills = 0; msg!("Hero has entered the game!"); - { // Spend 100 lamports to create player - let action_points_to_spend = 100; + // Spend 100 lamports to create player + let action_points_to_spend = CREATE_PLAYER_ACTION_POINTS; - spend_action_points( - action_points_to_spend, - &mut ctx.accounts.player_account, - &ctx.accounts.player.to_account_info(), - &ctx.accounts.system_program.to_account_info() - )?; - } + spend_action_points( + action_points_to_spend, + player_account, + &ctx.accounts.player.to_account_info(), + &ctx.accounts.system_program.to_account_info(), + ) + .map_err(|_| error!(RpgError::InsufficientActionPoints))?; Ok(()) } ``` -#### 7. Spawn Monster +### 7. Spawn Monster Now that we have a way to create players, we need a way to spawn monsters for them to fight. This instruction will create a new `Monster` account whose -address is a PDA derived with the `game` account, `player` account, and an index +address is a PDA derived from the `game` account, `player` account, and an index representing the number of monsters the player has faced. There are two design decisions here we should talk about: @@ -1196,21 +1451,26 @@ decisions here we should talk about: 2. We wrap both the `game` and `player` accounts in `Box` to allocate them to the Heap -```rust +```rust filename="spawn_monster.rs" // ----------- SPAWN MONSTER ---------- + +// Inside src/instructions/spawn_monster.rs +use anchor_lang::prelude::*; + +use crate::{helpers::spend_action_points, Game, Monster, Player, SPAWN_MONSTER_ACTION_POINTS, ANCHOR_DISCRIMINATOR}; + #[derive(Accounts)] pub struct SpawnMonster<'info> { pub game: Box>, - - #[account(mut, + #[account( + mut, has_one = game, has_one = player, )] pub player_account: Box>, - #[account( init, - seeds=[ + seeds = [ b"MONSTER", game.key().as_ref(), player.key().as_ref(), @@ -1218,46 +1478,39 @@ pub struct SpawnMonster<'info> { ], bump, payer = player, - space = std::mem::size_of::() + 8) - ] + space = ANCHOR_DISCRIMINATOR + Monster::INIT_SPACE + )] pub monster: Account<'info, Monster>, - #[account(mut)] pub player: Signer<'info>, - pub system_program: Program<'info, System>, } pub fn run_spawn_monster(ctx: Context) -> Result<()> { + let monster = &mut ctx.accounts.monster; + monster.player = ctx.accounts.player.key(); + monster.game = ctx.accounts.game.key(); + monster.hitpoints = 100; - { - ctx.accounts.monster.player = ctx.accounts.player.key().clone(); - ctx.accounts.monster.game = ctx.accounts.game.key().clone(); - ctx.accounts.monster.hitpoints = 100; + let player_account = &mut ctx.accounts.player_account; + player_account.next_monster_index = player_account.next_monster_index.checked_add(1).unwrap(); - msg!("Monster Spawned!"); - } - - { - ctx.accounts.player_account.next_monster_index = ctx.accounts.player_account.next_monster_index.checked_add(1).unwrap(); - } + msg!("Monster Spawned!"); - { // Spend 5 lamports to spawn monster - let action_point_to_spend = 5; - - spend_action_points( - action_point_to_spend, - &mut ctx.accounts.player_account, - &ctx.accounts.player.to_account_info(), - &ctx.accounts.system_program.to_account_info() - )?; - } + // Spend 5 lamports to spawn monster + let action_point_to_spend = SPAWN_MONSTER_ACTION_POINTS; + spend_action_points( + action_point_to_spend, + player_account, + &ctx.accounts.player.to_account_info(), + &ctx.accounts.system_program.to_account_info(), + )?; Ok(()) } ``` -#### 8. Attack Monster +### 8. Attack Monster Now! Let's attack those monsters and start gaining some exp! @@ -1274,138 +1527,235 @@ The `saturating_add` function ensures the number will never overflow. Say the `kills` was a u8 and my current kill count was 255 (0xFF). If I killed another and added normally, e.g. `255 + 1 = 0 (0xFF + 0x01 = 0x00) = 0`, the kill count would end up as 0. `saturating_add` will keep it at its max if it's about to +would end up as 0. `saturating_add` will keep it at its max if it's about to roll over, so `255 + 1 = 255`. The `checked_add` function will throw an error if it's about to overflow. Keep this in mind when doing math in Rust. Even though `kills` is a u64 and will never roll with it's current programming, it's good +it's about to overflow. Keep this in mind when doing math in Rust. Even though +`kills` is a u64 and will never roll with it's current programming, it's good practice to use safe math and consider roll-overs. -```rust +```rust filename="attack_monster.rs" // ----------- ATTACK MONSTER ---------- + +// Inside src/instructions/attack_monster.rs +use anchor_lang::prelude::*; +use crate::{helpers::spend_action_points, Monster, Player, ATTACK_ACTION_POINTS, error::RpgError}; + #[derive(Accounts)] pub struct AttackMonster<'info> { - #[account( mut, has_one = player, )] pub player_account: Box>, - #[account( mut, has_one = player, - constraint = monster.game == player_account.game + constraint = monster.game == player_account.game @ RpgError::GameMismatch )] pub monster: Box>, - #[account(mut)] pub player: Signer<'info>, - pub system_program: Program<'info, System>, } pub fn run_attack_monster(ctx: Context) -> Result<()> { + let player_account = &mut ctx.accounts.player_account; + let monster = &mut ctx.accounts.monster; - let mut did_kill = false; - - { - let hp_before_attack = ctx.accounts.monster.hitpoints; - let hp_after_attack = ctx.accounts.monster.hitpoints.saturating_sub(1); - let damage_dealt = hp_before_attack - hp_after_attack; - ctx.accounts.monster.hitpoints = hp_after_attack; - + let hp_before_attack = monster.hitpoints; + let hp_after_attack = monster.hitpoints.saturating_sub(1); + let damage_dealt = hp_before_attack.saturating_sub(hp_after_attack); + monster.hitpoints = hp_after_attack; - - if hp_before_attack > 0 && hp_after_attack == 0 { - did_kill = true; - } - - if damage_dealt > 0 { - msg!("Damage Dealt: {}", damage_dealt); - } else { - msg!("Stop it's already dead!"); - } - } - - { - ctx.accounts.player_account.experience = ctx.accounts.player_account.experience.saturating_add(1); + if damage_dealt > 0 { + msg!("Damage Dealt: {}", damage_dealt); + player_account.experience = player_account.experience.saturating_add(1); msg!("+1 EXP"); - if did_kill { - ctx.accounts.player_account.kills = ctx.accounts.player_account.kills.saturating_add(1); + if hp_after_attack == 0 { + player_account.kills = player_account.kills.saturating_add(1); msg!("You killed the monster!"); } + } else { + msg!("Stop it's already dead!"); } - { // Spend 1 lamports to attack monster - let action_point_to_spend = 1; + // Spend 1 lamport to attack monster + let action_point_to_spend = ATTACK_ACTION_POINTS; - spend_action_points( - action_point_to_spend, - &mut ctx.accounts.player_account, - &ctx.accounts.player.to_account_info(), - &ctx.accounts.system_program.to_account_info() - )?; - } + spend_action_points( + action_point_to_spend, + player_account, + &ctx.accounts.player.to_account_info(), + &ctx.accounts.system_program.to_account_info() + )?; Ok(()) } ``` -#### Redeem to Treasury +### 9. Redeem to Treasury This is our last instruction. This instruction lets anyone send the spent `action_points` to the `treasury` wallet. Again, let's box the rpg accounts and use safe math. -```rust +```rust filename="collect_points.rs" // ----------- REDEEM TO TREASUREY ---------- + +// Inside src/instructions/collect_points.rs +use anchor_lang::prelude::*; +use crate::{error::RpgError, Game, Player}; + #[derive(Accounts)] pub struct CollectActionPoints<'info> { - #[account( mut, - has_one=treasury + has_one = treasury @ RpgError::InvalidTreasury )] pub game: Box>, - #[account( mut, - has_one=game + has_one = game @ RpgError::PlayerGameMismatch )] pub player: Box>, - #[account(mut)] /// CHECK: It's being checked in the game account - pub treasury: AccountInfo<'info>, - + pub treasury: UncheckedAccount<'info>, pub system_program: Program<'info, System>, } -// literally anyone who pays for the TX fee can run this command - give it to a clockwork bot +// Literally anyone who pays for the TX fee can run this command - give it to a clockwork bot pub fn run_collect_action_points(ctx: Context) -> Result<()> { - let transfer_amount: u64 = ctx.accounts.player.action_points_to_be_collected; + let transfer_amount = ctx.accounts.player.action_points_to_be_collected; + + // Transfer lamports from player to treasury + let player_info = ctx.accounts.player.to_account_info(); + let treasury_info = ctx.accounts.treasury.to_account_info(); + + **player_info.try_borrow_mut_lamports()? = player_info + .lamports() + .checked_sub(transfer_amount) + .ok_or(RpgError::InsufficientFunds)?; - **ctx.accounts.player.to_account_info().try_borrow_mut_lamports()? -= transfer_amount; - **ctx.accounts.treasury.to_account_info().try_borrow_mut_lamports()? += transfer_amount; + **treasury_info.try_borrow_mut_lamports()? = treasury_info + .lamports() + .checked_add(transfer_amount) + .ok_or(RpgError::ArithmeticOverflow)?; ctx.accounts.player.action_points_to_be_collected = 0; - ctx.accounts.game.action_points_collected = ctx.accounts.game.action_points_collected.checked_add(transfer_amount).unwrap(); + ctx.accounts.game.action_points_collected = ctx.accounts.game + .action_points_collected + .checked_add(transfer_amount) + .ok_or(RpgError::ArithmeticOverflow)?; - msg!("The treasury collected {} action points to treasury", transfer_amount); + msg!("The treasury collected {} action points", transfer_amount); Ok(()) } ``` -#### Putting it all Together +### 10. Error Handling + +Now, let's add all the errors that we have used till now in `errors.rs` file. + +```rust filename="errors.rs" +// ------------RPG ERRORS-------------- + +// Inside src/error/errors.rs + +use anchor_lang::prelude::*; + +#[error_code] +pub enum RpgError { + #[msg("Arithmetic overflow occurred")] + ArithmeticOverflow, + #[msg("Invalid game configuration")] + InvalidGameConfig, + #[msg("Player not found")] + PlayerNotFound, + #[msg("Monster not found")] + MonsterNotFound, + #[msg("Insufficient action points")] + InsufficientActionPoints, + #[msg("Invalid attack")] + InvalidAttack, + #[msg("Maximum inventory size reached")] + MaxInventoryReached, + #[msg("Invalid item operation")] + InvalidItemOperation, + #[msg("Monster and player are not in the same game")] + GameMismatch, + #[msg("Invalid treasury account")] + InvalidTreasury, + #[msg("Player does not belong to the specified game")] + PlayerGameMismatch, + #[msg("Insufficient funds for transfer")] + InsufficientFunds +} +``` + +### 11. Module Declarations + +We need to declare all the modules used in the project as follows: + +```rust + +// Inside src/error/mod.rs +pub mod errors; +pub use errors::RpgError; // Expose the custom error type + +// Inside src/instructions/mod.rs +pub mod attack_monster; +pub mod collect_points; +pub mod create_game; +pub mod create_player; +pub mod spawn_monster; + +pub use attack_monster::*; // Expose attack_monster functions +pub use collect_points::*; // Expose collect_points functions +pub use create_game::*; // Expose create_game functions +pub use create_player::*; // Expose create_player functions +pub use spawn_monster::*; // Expose spawn_monster functions + +// Inside src/state/mod.rs +pub mod game; +pub mod monster; +pub mod player; + +pub use game::*; // Expose game state +pub use monster::*; // Expose monster state +pub use player::*; // Expose player state +``` + +### 12. Putting it all Together Now that all of our instruction logic is written, let's add these functions to actual instructions in the program. It can also be helpful to log compute units for each instruction. -```rust +```rust filename="lib.rs" + +// Insider src/lib.rs +use anchor_lang::prelude::*; +use anchor_lang::solana_program::log::sol_log_compute_units; + +mod state; +mod instructions; +mod constants; +mod helpers; +mod error; + +use state::*; +use constants::*; +use instructions::*; + +declare_id!("5Sc3gJv4tvPiFzE75boYMJabbNRs44zRhtT23fLdKewz"); + #[program] pub mod rpg { use super::*; @@ -1439,7 +1789,6 @@ pub mod rpg { sol_log_compute_units(); Ok(()) } - } ``` @@ -1450,73 +1799,134 @@ successfully. anchor build ``` -#### Testing +### Testing -Now, let's see this baby work! +Now, let's put everything together and see it in action! -Let's set up the `tests/rpg.ts` file. We will be filling out each test in turn. -But first, we needed to set up a couple of different accounts. Mainly the -`gameMaster` and the `treasury`. +We'll begin by setting up the `tests/rpg.ts` file. We will be writing each test +step by step. But before diving into the tests, we need to initialize a few +important accounts, specifically the `gameMaster` and the `treasury` accounts. -```typescript +```typescript filename="rpg.ts" import * as anchor from "@coral-xyz/anchor"; import { Program } from "@coral-xyz/anchor"; -import { Rpg, IDL } from "../target/types/rpg"; +import { Rpg } from "../target/types/rpg"; import { assert } from "chai"; +import { + Keypair, + LAMPORTS_PER_SOL, + PublicKey, + TransactionSignature, + TransactionConfirmationStrategy, +} from "@solana/web3.js"; import NodeWallet from "@coral-xyz/anchor/dist/cjs/nodewallet"; -describe("RPG", () => { - // Configure the client to use the local cluster. - anchor.setProvider(anchor.AnchorProvider.env()); - - const program = anchor.workspace.Rpg as Program; - const wallet = anchor.workspace.Rpg.provider.wallet - .payer as anchor.web3.Keypair; - const gameMaster = wallet; - const player = wallet; - - const treasury = anchor.web3.Keypair.generate(); - - it("Create Game", async () => {}); - - it("Create Player", async () => {}); - - it("Spawn Monster", async () => {}); - - it("Attack Monster", async () => {}); - - it("Deposit Action Points", async () => {}); +const GAME_SEED = "GAME"; +const PLAYER_SEED = "PLAYER"; +const MONSTER_SEED = "MONSTER"; +const MAX_ITEMS_PER_PLAYER = 8; +const INITIAL_MONSTER_HITPOINTS = 100; +const AIRDROP_AMOUNT = 10 * LAMPORTS_PER_SOL; +const CREATE_PLAYER_ACTION_POINTS = 100; +const SPAWN_MONSTER_ACTION_POINTS = 5; +const ATTACK_MONSTER_ACTION_POINTS = 1; +const MONSTER_INDEX_BYTE_LENGTH = 8; + +const provider = anchor.AnchorProvider.env(); +anchor.setProvider(provider); + +const program = anchor.workspace.Rpg as Program; +const wallet = provider.wallet as NodeWallet; +const gameMaster = wallet; +const player = wallet; + +const treasury = Keypair.generate(); + +const findProgramAddress = (seeds: Buffer[]): [PublicKey, number] => + PublicKey.findProgramAddressSync(seeds, program.programId); + +const confirmTransaction = async ( + signature: TransactionSignature, + provider: anchor.Provider, +) => { + const latestBlockhash = await provider.connection.getLatestBlockhash(); + const confirmationStrategy: TransactionConfirmationStrategy = { + signature, + blockhash: latestBlockhash.blockhash, + lastValidBlockHeight: latestBlockhash.lastValidBlockHeight, + }; + + try { + const confirmation = + await provider.connection.confirmTransaction(confirmationStrategy); + if (confirmation.value.err) { + throw new Error( + `Transaction failed: ${confirmation.value.err.toString()}`, + ); + } + } catch (error) { + throw new Error(`Transaction confirmation failed: ${error.message}`); + } +}; + +const createGameAddress = () => + findProgramAddress([Buffer.from(GAME_SEED), treasury.publicKey.toBuffer()]); + +const createPlayerAddress = (gameAddress: PublicKey) => + findProgramAddress([ + Buffer.from(PLAYER_SEED), + gameAddress.toBuffer(), + player.publicKey.toBuffer(), + ]); + +const createMonsterAddress = ( + gameAddress: PublicKey, + monsterIndex: anchor.BN, +) => + findProgramAddress([ + Buffer.from(MONSTER_SEED), + gameAddress.toBuffer(), + player.publicKey.toBuffer(), + monsterIndex.toArrayLike(Buffer, "le", MONSTER_INDEX_BYTE_LENGTH), + ]); + +describe("RPG game", () => { + it("creates a new game", async () => {}); + + it("creates a new player", async () => {}); + + it("spawns a monster", async () => {}); + + it("attacks a monster", async () => {}); + + it("deposits action points", async () => {}); }); ``` -Now lets add in the `Create Game` test. Just call `createGame` with eight items, -be sure to pass in all the accounts, and make sure the `treasury` account signs -the transaction. +Now lets add in the `creates a new game` test. Just call `createGame` with eight +items, be sure to pass in all the accounts, and make sure the `treasury` account +signs the transaction. ```typescript -it("Create Game", async () => { - const [gameKey] = anchor.web3.PublicKey.findProgramAddressSync( - [Buffer.from("GAME"), treasury.publicKey.toBuffer()], - program.programId, - ); - - const txHash = await program.methods - .createGame( - 8, // 8 Items per player - ) - .accounts({ - game: gameKey, - gameMaster: gameMaster.publicKey, - treasury: treasury.publicKey, - systemProgram: anchor.web3.SystemProgram.programId, - }) - .signers([treasury]) - .rpc(); - - await program.provider.connection.confirmTransaction(txHash); - - // Print out if you'd like - // const account = await program.account.game.fetch(gameKey); +it("creates a new game", async () => { + try { + const [gameAddress] = createGameAddress(); + + const createGameSignature = await program.methods + .createGame(MAX_ITEMS_PER_PLAYER) + .accounts({ + game: gameAddress, + gameMaster: gameMaster.publicKey, + treasury: treasury.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([treasury]) + .rpc(); + + await confirmTransaction(createGameSignature, provider); + } catch (error) { + throw new Error(`Failed to create game: ${error.message}`); + } }); ``` @@ -1531,118 +1941,91 @@ anchor test some `.pnp.*` files and no `node_modules`, you may want to call `rm -rf .pnp.*` followed by `npm i` and then `yarn install`. That should work. -Now that everything is running, let's implement the `Create Player`, -`Spawn Monster`, and `Attack Monster` tests. Run each test as you complete them -to make sure things are running smoothly. +Now that everything is running, let's implement the `creates a new player`, +`spawns a monster`, and `attacks a monster` tests. Run each test as you complete +them to make sure things are running smoothly. ```typescript -it("Create Player", async () => { - const [gameKey] = anchor.web3.PublicKey.findProgramAddressSync( - [Buffer.from("GAME"), treasury.publicKey.toBuffer()], - program.programId, - ); - - const [playerKey] = anchor.web3.PublicKey.findProgramAddressSync( - [Buffer.from("PLAYER"), gameKey.toBuffer(), player.publicKey.toBuffer()], - program.programId, - ); - - const txHash = await program.methods - .createPlayer() - .accounts({ - game: gameKey, - playerAccount: playerKey, - player: player.publicKey, - systemProgram: anchor.web3.SystemProgram.programId, - }) - .rpc(); - - await program.provider.connection.confirmTransaction(txHash); - - // Print out if you'd like - // const account = await program.account.player.fetch(playerKey); +it("creates a new player", async () => { + try { + const [gameAddress] = createGameAddress(); + const [playerAddress] = createPlayerAddress(gameAddress); + + const createPlayerSignature = await program.methods + .createPlayer() + .accounts({ + game: gameAddress, + playerAccount: playerAddress, + player: player.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); + + await confirmTransaction(createPlayerSignature, provider); + } catch (error) { + throw new Error(`Failed to create player: ${error.message}`); + } }); -it("Spawn Monster", async () => { - const [gameKey] = anchor.web3.PublicKey.findProgramAddressSync( - [Buffer.from("GAME"), treasury.publicKey.toBuffer()], - program.programId, - ); - - const [playerKey] = anchor.web3.PublicKey.findProgramAddressSync( - [Buffer.from("PLAYER"), gameKey.toBuffer(), player.publicKey.toBuffer()], - program.programId, - ); +it("spawns a monster", async () => { + try { + const [gameAddress] = createGameAddress(); + const [playerAddress] = createPlayerAddress(gameAddress); - const playerAccount = await program.account.player.fetch(playerKey); + const playerAccount = await program.account.player.fetch(playerAddress); + const [monsterAddress] = createMonsterAddress( + gameAddress, + playerAccount.nextMonsterIndex, + ); - const [monsterKey] = anchor.web3.PublicKey.findProgramAddressSync( - [ - Buffer.from("MONSTER"), - gameKey.toBuffer(), - player.publicKey.toBuffer(), - playerAccount.nextMonsterIndex.toBuffer("le", 8), - ], - program.programId, - ); - - const txHash = await program.methods - .spawnMonster() - .accounts({ - game: gameKey, - playerAccount: playerKey, - monster: monsterKey, - player: player.publicKey, - systemProgram: anchor.web3.SystemProgram.programId, - }) - .rpc(); - - await program.provider.connection.confirmTransaction(txHash); - - // Print out if you'd like - // const account = await program.account.monster.fetch(monsterKey); + const spawnMonsterSignature = await program.methods + .spawnMonster() + .accounts({ + game: gameAddress, + playerAccount: playerAddress, + monster: monsterAddress, + player: player.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); + + await confirmTransaction(spawnMonsterSignature, provider); + } catch (error) { + throw new Error(`Failed to spawn monster: ${error.message}`); + } }); -it("Attack Monster", async () => { - const [gameKey] = anchor.web3.PublicKey.findProgramAddressSync( - [Buffer.from("GAME"), treasury.publicKey.toBuffer()], - program.programId, - ); +it("attacks a monster", async () => { + try { + const [gameAddress] = createGameAddress(); + const [playerAddress] = createPlayerAddress(gameAddress); - const [playerKey] = anchor.web3.PublicKey.findProgramAddressSync( - [Buffer.from("PLAYER"), gameKey.toBuffer(), player.publicKey.toBuffer()], - program.programId, - ); + const playerAccount = await program.account.player.fetch(playerAddress); + const [monsterAddress] = createMonsterAddress( + gameAddress, + playerAccount.nextMonsterIndex.subn(1), + ); - // Fetch the latest monster created - const playerAccount = await program.account.player.fetch(playerKey); - const [monsterKey] = anchor.web3.PublicKey.findProgramAddressSync( - [ - Buffer.from("MONSTER"), - gameKey.toBuffer(), - player.publicKey.toBuffer(), - playerAccount.nextMonsterIndex.subn(1).toBuffer("le", 8), - ], - program.programId, - ); - - const txHash = await program.methods - .attackMonster() - .accounts({ - playerAccount: playerKey, - monster: monsterKey, - player: player.publicKey, - systemProgram: anchor.web3.SystemProgram.programId, - }) - .rpc(); - - await program.provider.connection.confirmTransaction(txHash); - - // Print out if you'd like - // const account = await program.account.monster.fetch(monsterKey); - - const monsterAccount = await program.account.monster.fetch(monsterKey); - assert(monsterAccount.hitpoints.eqn(99)); + const attackMonsterSignature = await program.methods + .attackMonster() + .accounts({ + playerAccount: playerAddress, + monster: monsterAddress, + player: player.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); + + await confirmTransaction(attackMonsterSignature, provider); + + const monsterAccount = await program.account.monster.fetch(monsterAddress); + assert( + monsterAccount.hitpoints.eqn(INITIAL_MONSTER_HITPOINTS - 1), + "Monster hitpoints should decrease by 1 after attack", + ); + } catch (error) { + throw new Error(`Failed to attack monster: ${error.message}`); + } }); ``` @@ -1650,7 +2033,7 @@ Notice the monster that we choose to attack is `playerAccount.nextMonsterIndex.subn(1).toBuffer('le', 8)`. This allows us to attack the most recent monster spawned. Anything below the `nextMonsterIndex` should be okay. Lastly, since seeds are just an array of bytes we have to turn -the index into the u64, which is little endian `le` at 8 bytes. +the index into the u64, which is a little endian `le` at 8 bytes. Run `anchor test` to deal some damage! @@ -1662,91 +2045,101 @@ game were running continuously, it probably makes sense to use something like [clockwork](https://www.clockwork.xyz/) cron jobs. ```typescript -it("Deposit Action Points", async () => { - const [gameKey] = anchor.web3.PublicKey.findProgramAddressSync( - [Buffer.from("GAME"), treasury.publicKey.toBuffer()], - program.programId, - ); - - const [playerKey] = anchor.web3.PublicKey.findProgramAddressSync( - [Buffer.from("PLAYER"), gameKey.toBuffer(), player.publicKey.toBuffer()], - program.programId, - ); - - // To show that anyone can deposit the action points - // Ie, give this to a clockwork bot - const clockworkWallet = anchor.web3.Keypair.generate(); - - // To give it a starting balance - const clockworkProvider = new anchor.AnchorProvider( - program.provider.connection, - new NodeWallet(clockworkWallet), - anchor.AnchorProvider.defaultOptions(), - ); - const clockworkProgram = new anchor.Program( - IDL, - program.programId, - clockworkProvider, - ); - - // Have to give the accounts some lamports else the tx will fail - const amountToInitialize = 10000000000; - - const clockworkAirdropTx = - await clockworkProgram.provider.connection.requestAirdrop( - clockworkWallet.publicKey, - amountToInitialize, +it("deposits action points", async () => { + try { + const [gameAddress] = createGameAddress(); + const [playerAddress] = createPlayerAddress(gameAddress); + + // To show that anyone can deposit the action points + // Ie, give this to a clockwork bot + const clockworkWallet = anchor.web3.Keypair.generate(); + + // To give it a starting balance + const clockworkProvider = new anchor.AnchorProvider( + program.provider.connection, + new NodeWallet(clockworkWallet), + anchor.AnchorProvider.defaultOptions(), ); - await program.provider.connection.confirmTransaction( - clockworkAirdropTx, - "confirmed", - ); - const treasuryAirdropTx = - await clockworkProgram.provider.connection.requestAirdrop( + // Have to give the accounts some lamports else the tx will fail + const amountToInitialize = 10000000000; + + const clockworkAirdropTx = + await clockworkProvider.connection.requestAirdrop( + clockworkWallet.publicKey, + amountToInitialize, + ); + + await confirmTransaction(clockworkAirdropTx, clockworkProvider); + + const treasuryAirdropTx = await clockworkProvider.connection.requestAirdrop( treasury.publicKey, amountToInitialize, ); - await program.provider.connection.confirmTransaction( - treasuryAirdropTx, - "confirmed", - ); - - const txHash = await clockworkProgram.methods - .depositActionPoints() - .accounts({ - game: gameKey, - player: playerKey, - treasury: treasury.publicKey, - systemProgram: anchor.web3.SystemProgram.programId, - }) - .rpc(); - - await program.provider.connection.confirmTransaction(txHash); - - const expectedActionPoints = 100 + 5 + 1; // Player Create ( 100 ) + Monster Spawn ( 5 ) + Monster Attack ( 1 ) - const treasuryBalance = await program.provider.connection.getBalance( - treasury.publicKey, - ); - assert( - treasuryBalance == amountToInitialize + expectedActionPoints, // Player Create ( 100 ) + Monster Spawn ( 5 ) + Monster Attack ( 1 ) - ); - - const gameAccount = await program.account.game.fetch(gameKey); - assert(gameAccount.actionPointsCollected.eqn(expectedActionPoints)); - - const playerAccount = await program.account.player.fetch(playerKey); - assert(playerAccount.actionPointsSpent.eqn(expectedActionPoints)); - assert(playerAccount.actionPointsToBeCollected.eqn(0)); + + await confirmTransaction(treasuryAirdropTx, clockworkProvider); + + const depositActionPointsSignature = await program.methods + .depositActionPoints() + .accounts({ + game: gameAddress, + player: playerAddress, + treasury: treasury.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .rpc(); + + await confirmTransaction(depositActionPointsSignature, provider); + + const expectedActionPoints = + CREATE_PLAYER_ACTION_POINTS + + SPAWN_MONSTER_ACTION_POINTS + + ATTACK_MONSTER_ACTION_POINTS; + const treasuryBalance = await provider.connection.getBalance( + treasury.publicKey, + ); + assert( + treasuryBalance === AIRDROP_AMOUNT + expectedActionPoints, + "Treasury balance should match expected action points", + ); + + const gameAccount = await program.account.game.fetch(gameAddress); + assert( + gameAccount.actionPointsCollected.eqn(expectedActionPoints), + "Game action points collected should match expected", + ); + + const playerAccount = await program.account.player.fetch(playerAddress); + assert( + playerAccount.actionPointsSpent.eqn(expectedActionPoints), + "Player action points spent should match expected", + ); + assert( + playerAccount.actionPointsToBeCollected.eqn(0), + "Player should have no action points to be collected", + ); + } catch (error) { + throw new Error(`Failed to deposit action points: ${error.message}`); + } }); ``` Finally, run `anchor test` to see everything working. +```bash + +RPG game + ✔ creates a new game (317ms) + ✔ creates a new player (399ms) + ✔ spawns a monster (411ms) + ✔ attacks a monster (413ms) + ✔ deposits action points (1232ms) +``` + Congratulations! This was a lot to cover, but you now have a mini RPG game engine. If things aren't quite working, go back through the lab and find where -you went wrong. If you need, you can refer to the -[`main` branch of the solution code](https://github.com/Unboxed-Software/anchor-rpg). +you went wrong. If you need to, you can refer to the +[`main` branch of the solution code](https://github.com/solana-developers/anchor-rpg). Be sure to put these concepts into practice in your own programs. Each little optimization adds up! @@ -1754,17 +2147,17 @@ optimization adds up! ## Challenge Now it's your turn to practice independently. Go back through the lab code -looking for additional optimizations and/or expansion you can make. Think +looking for additional optimizations and/or expansions you can make. Think through new systems and features you would add and how you would optimize them. -You can find some example modifications on the `challenge-solution` branch of -the -[RPG repository](https://github.com/Unboxed-Software/anchor-rpg/tree/challenge-solution). +You can find some example modifications on the +[`challenge-solution` branch of the RPG repository](https://github.com/solana-developers/anchor-rpg/tree/challenge-solution). Finally, go through one of your own programs and think about optimizations you can make to improve memory management, storage size, and/or concurrency. + Push your code to GitHub and [tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=4a628916-91f5-46a9-8eb0-6ba453aa6ca6)! diff --git a/content/courses/program-security/account-data-matching.md b/content/courses/program-security/account-data-matching.md index baff3b5bf..6ecb10f5a 100644 --- a/content/courses/program-security/account-data-matching.md +++ b/content/courses/program-security/account-data-matching.md @@ -12,39 +12,43 @@ description: - Use **data validation checks** to verify that account data matches an expected value. Without appropriate data validation checks, unexpected accounts may be - used in an instruction. + used in an instruction handler. - To implement data validation checks in Rust, simply compare the data stored on an account to an expected value. + ```rust if ctx.accounts.user.key() != ctx.accounts.user_data.user { return Err(ProgramError::InvalidAccountData.into()); } ``` -- In Anchor, you can use `constraint` to checks whether the given expression - evaluates to true. Alternatively, you can use `has_one` to check that a target - account field stored on the account matches the key of an account in the - `Accounts` struct. + +- In Anchor, you can use a + [`constraint`](https://www.anchor-lang.com/docs/account-constraints) to check + whether the given expression evaluates to true. Alternatively, you can use + `has_one` to check that a target account field stored on the account matches + the key of an account in the `Accounts` struct. ## Lesson Account data matching refers to data validation checks used to verify the data stored on an account matches an expected value. Data validation checks provide a way to include additional constraints to ensure the appropriate accounts are -passed into an instruction. +passed into an instruction handler. -This can be useful when accounts required by an instruction have dependencies on -values stored in other accounts or if an instruction is dependent on the data -stored in an account. +This can be useful when accounts required by an instruction handler have +dependencies on values stored in other accounts or if an instruction handler is +dependent on the data stored in an account. -#### Missing data validation check +### Missing data validation check -The example below includes an `update_admin` instruction that updates the -`admin` field stored on an `admin_config` account. +The example below includes an `update_admin` instruction handler that updates +the `admin` field stored on an `admin_config` account. -The instruction is missing a data validation check to verify the `admin` account -signing the transaction matches the `admin` stored on the `admin_config` +The instruction handler is missing a data validation check to verify the `admin` +account signing the transaction matches the `admin` stored on the `admin_config` account. This means any account signing the transaction and passed into the -instruction as the `admin` account can update the `admin_config` account. +instruction handler as the `admin` account can update the `admin_config` +account. ```rust use anchor_lang::prelude::*; @@ -67,7 +71,8 @@ pub struct UpdateAdmin<'info> { pub admin_config: Account<'info, AdminConfig>, #[account(mut)] pub admin: Signer<'info>, - pub new_admin: SystemAccount<'info>, + /// CHECK: This account will not be checked by anchor + pub new_admin: UncheckedAccount<'info>, } #[account] @@ -76,7 +81,7 @@ pub struct AdminConfig { } ``` -#### Add data validation check +### Add Data Validation Check The basic Rust approach to solve this problem is to simply compare the passed in `admin` key to the `admin` key stored in the `admin_config` account, throwing an @@ -88,9 +93,9 @@ if ctx.accounts.admin.key() != ctx.accounts.admin_config.admin { } ``` -By adding a data validation check, the `update_admin` instruction would only -process if the `admin` signer of the transaction matched the `admin` stored on -the `admin_config` account. +By adding a data validation check, the `update_admin` instruction handler would +only process if the `admin` signer of the transaction matched the `admin` stored +on the `admin_config` account. ```rust use anchor_lang::prelude::*; @@ -116,7 +121,8 @@ pub struct UpdateAdmin<'info> { pub admin_config: Account<'info, AdminConfig>, #[account(mut)] pub admin: Signer<'info>, - pub new_admin: SystemAccount<'info>, + /// CHECK: This account will not be checked by anchor + pub new_admin: UncheckedAccount<'info>, } #[account] @@ -125,11 +131,11 @@ pub struct AdminConfig { } ``` -#### Use Anchor constraints +### Use Anchor Constraints Anchor simplifies this with the `has_one` constraint. You can use the `has_one` -constraint to move the data validation check from the instruction logic to the -`UpdateAdmin` struct. +constraint to move the data validation check from the instruction handler logic +to the `UpdateAdmin` struct. In the example below, `has_one = admin` specifies that the `admin` account signing the transaction must match the `admin` field stored on the @@ -161,7 +167,8 @@ pub struct UpdateAdmin<'info> { pub admin_config: Account<'info, AdminConfig>, #[account(mut)] pub admin: Signer<'info>, - pub new_admin: SystemAccount<'info>, + /// CHECK: This account will not be checked by anchor + pub new_admin: UncheckedAccount<'info>, } #[account] @@ -185,46 +192,51 @@ pub struct UpdateAdmin<'info> { pub admin_config: Account<'info, AdminConfig>, #[account(mut)] pub admin: Signer<'info>, - pub new_admin: SystemAccount<'info>, + /// CHECK: This account will not be checked by anchor + pub new_admin: UncheckedAccount<'info>, } ``` ## Lab -For this lab we'll create a simple “vault” program similar to the program we +For this lab, we'll create a simple “vault” program similar to the program we used in the Signer Authorization lesson and the Owner Check lesson. Similar to those labs, we'll show in this lab how a missing data validation check could allow the vault to be drained. -#### 1. Starter +### 1. Starter -To get started, download the starter code from the `starter` branch of -[this repository](https://github.com/Unboxed-Software/solana-account-data-matching). +To get started, download the starter code from the +[`starter` branch of this repository](https://github.com/solana-developers/account-data-matching/tree/starter). The starter code includes a program with two instructions and the boilerplate setup for the test file. -The `initialize_vault` instruction initializes a new `Vault` account and a new -`TokenAccount`. The `Vault` account will store the address of a token account, -the authority of the vault, and a withdraw destination token account. +The `initialize_vault` instruction handler initializes a new `Vault` account and +a new `TokenAccount`. The `Vault` account will store the address of a token +account, the authority of the vault, and a withdraw destination token account. The authority of the new token account will be set as the `vault`, a PDA of the program. This allows the `vault` account to sign for the transfer of tokens from the token account. -The `insecure_withdraw` instruction transfers all the tokens in the `vault` -account's token account to a `withdraw_destination` token account. +The `insecure_withdraw` instruction handler transfers all the tokens in the +`vault` account's token account to a `withdraw_destination` token account. -Notice that this instruction \***\*does\*\*** have a signer check for + + +Notice that this instruction handler \***\*does\*\*** have a signer check for `authority` and an owner check for `vault`. However, nowhere in the account -validation or instruction logic is there code that checks that the `authority` -account passed into the instruction matches the `authority` account on the -`vault`. +validation or instruction handler logic is there code that checks that the +`authority` account passed into the instruction handler matches the `authority` +account on the `vault`. ```rust use anchor_lang::prelude::*; use anchor_spl::token::{self, Mint, Token, TokenAccount}; -declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); +declare_id!("J89xWAprDsLAAwcTA6AhrK49UMSAYJJWdXvw4ZQK4suu"); + +pub const DISCRIMINATOR_SIZE: usize = 8; #[program] pub mod account_data_matching { @@ -240,7 +252,7 @@ pub mod account_data_matching { pub fn insecure_withdraw(ctx: Context) -> Result<()> { let amount = ctx.accounts.token_account.amount; - let seeds = &[b"vault".as_ref(), &[*ctx.bumps.get("vault").unwrap()]]; + let seeds = &[b"vault".as_ref(), &[ctx.bumps.vault]]; let signer = [&seeds[..]]; let cpi_ctx = CpiContext::new_with_signer( @@ -263,7 +275,7 @@ pub struct InitializeVault<'info> { #[account( init, payer = authority, - space = 8 + 32 + 32 + 32, + space = DISCRIMINATOR_SIZE + Vault::INIT_SPACE, seeds = [b"vault"], bump, )] @@ -306,6 +318,7 @@ pub struct InsecureWithdraw<'info> { } #[account] +#[derive(Default, InitSpace)] pub struct Vault { token_account: Pubkey, authority: Pubkey, @@ -313,64 +326,73 @@ pub struct Vault { } ``` -#### 2. Test `insecure_withdraw` instruction +### 2. Test insecure_withdraw Instruction Handler To prove that this is a problem, let's write a test where an account other than the vault's `authority` tries to withdraw from the vault. The test file includes the code to invoke the `initialize_vault` instruction -using the provider wallet as the `authority` and then mints 100 tokens to the -`vault` token account. +handler using the provider wallet as the `authority` and then mints 100 tokens +to the `vault` token account. -Add a test to invoke the `insecure_withdraw` instruction. Use -`withdrawDestinationFake` as the `withdrawDestination` account and `walletFake` -as the `authority`. Then send the transaction using `walletFake`. +Add a test to invoke the `insecure_withdraw` instruction handler. Use +`fakeWithdrawDestination` as the `withdrawDestination` account and `fakeWallet` +as the `authority`. Then send the transaction using `fakeWallet`. Since there are no checks the verify the `authority` account passed into the -instruction matches the values stored on the `vault` account initialized in the -first test, the instruction will process successfully and the tokens will be -transferred to the `withdrawDestinationFake` account. +instruction handler matches the values stored on the `vault` account initialized +in the first test, the instruction handler will process successfully and the +tokens will be transferred to the `fakeWithdrawDestination` account. ```typescript -describe("account-data-matching", () => { +describe("Account Data Matching", () => { ... - it("Insecure withdraw", async () => { - const tx = await program.methods - .insecureWithdraw() - .accounts({ - vault: vaultPDA, - tokenAccount: tokenPDA, - withdrawDestination: withdrawDestinationFake, - authority: walletFake.publicKey, - }) - .transaction() - - await anchor.web3.sendAndConfirmTransaction(connection, tx, [walletFake]) - - const balance = await connection.getTokenAccountBalance(tokenPDA) - expect(balance.value.uiAmount).to.eq(0) - }) + it("allows insecure withdrawal", async () => { + try { + const tx = await program.methods + .insecureWithdraw() + .accounts({ + vault: vaultPDA, + tokenAccount: tokenPDA, + withdrawDestination: fakeWithdrawDestination, + authority: fakeWallet.publicKey, + }) + .transaction(); + + await anchor.web3.sendAndConfirmTransaction(provider.connection, tx, [ + fakeWallet, + ]); + + const tokenAccount = await getAccount(provider.connection, tokenPDA); + expect(Number(tokenAccount.amount)).to.equal(0); + } catch (error) { + throw new Error( + `Insecure withdraw failed unexpectedly: ${error.message}`, + ); + } + }); }) ``` Run `anchor test` to see that both transactions will complete successfully. ```bash -account-data-matching - ✔ Initialize Vault (811ms) - ✔ Insecure withdraw (403ms) +Account Data Matching + ✔ initializes the vault and mints tokens (879ms) + ✔ allows insecure withdrawal (431ms) ``` -#### 3. Add `secure_withdraw` instruction +### 3. Add secure_withdraw Instruction Handler -Let's go implement a secure version of this instruction called +Let's go implement a secure version of this instruction handler called `secure_withdraw`. -This instruction will be identical to the `insecure_withdraw` instruction, -except we'll use the `has_one` constraint in the account validation struct -(`SecureWithdraw`) to check that the `authority` account passed into the -instruction matches the `authority` account on the `vault` account. That way -only the correct authority account can withdraw the vault's tokens. +This instruction handler will be identical to the `insecure_withdraw` +instruction handler, except we'll use the `has_one` constraint in the account +validation struct (`SecureWithdraw`) to check that the `authority` account +passed into the instruction handler matches the `authority` account on the +`vault` account. That way only the correct authority account can withdraw the +vault's tokens. ```rust use anchor_lang::prelude::*; @@ -378,6 +400,8 @@ use anchor_spl::token::{self, Mint, Token, TokenAccount}; declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); +pub const DISCRIMINATOR_SIZE: usize = 8; + #[program] pub mod account_data_matching { use super::*; @@ -385,7 +409,7 @@ pub mod account_data_matching { pub fn secure_withdraw(ctx: Context) -> Result<()> { let amount = ctx.accounts.token_account.amount; - let seeds = &[b"vault".as_ref(), &[*ctx.bumps.get("vault").unwrap()]]; + let seeds = &[b"vault".as_ref(), &[ctx.bumps.vault]]; let signer = [&seeds[..]]; let cpi_ctx = CpiContext::new_with_signer( @@ -411,7 +435,6 @@ pub struct SecureWithdraw<'info> { has_one = token_account, has_one = authority, has_one = withdraw_destination, - )] pub vault: Account<'info, Vault>, #[account( @@ -427,94 +450,104 @@ pub struct SecureWithdraw<'info> { } ``` -#### 4. Test `secure_withdraw` instruction +### 4. Test secure_withdraw Instruction Handler -Now let's test the `secure_withdraw` instruction with two tests: one that uses -`walletFake` as the authority and one that uses `wallet` as the authority. We -expect the first invocation to return an error and the second to succeed. +Now let's test the `secure_withdraw` instruction handler with two tests: one +that uses `fakeWallet` as the authority and one that uses `wallet` as the +authority. We expect the first invocation to return an error and the second to +succeed. ```typescript describe("account-data-matching", () => { ... - it("Secure withdraw, expect error", async () => { + it("prevents unauthorized secure withdrawal", async () => { try { const tx = await program.methods .secureWithdraw() .accounts({ vault: vaultPDA, tokenAccount: tokenPDA, - withdrawDestination: withdrawDestinationFake, - authority: walletFake.publicKey, + withdrawDestination: fakeWithdrawDestination, + authority: fakeWallet.publicKey, }) - .transaction() + .transaction(); - await anchor.web3.sendAndConfirmTransaction(connection, tx, [walletFake]) - } catch (err) { - expect(err) - console.log(err) + await anchor.web3.sendAndConfirmTransaction(provider.connection, tx, [ + fakeWallet, + ]); + + throw new Error("Secure withdraw should have failed but didn't"); + } catch (error) { + expect(error).to.be.an("error"); + console.log("Expected error occurred:", error.message); } - }) - - it("Secure withdraw", async () => { - await spl.mintTo( - connection, - wallet.payer, - mint, - tokenPDA, - wallet.payer, - 100 - ) - - await program.methods - .secureWithdraw() - .accounts({ - vault: vaultPDA, - tokenAccount: tokenPDA, - withdrawDestination: withdrawDestination, - authority: wallet.publicKey, - }) - .rpc() - - const balance = await connection.getTokenAccountBalance(tokenPDA) - expect(balance.value.uiAmount).to.eq(0) - }) + }); + + it("allows secure withdrawal by authorized user", async () => { + try { + await new Promise((resolve) => setTimeout(resolve, 1000)); + + await mintTo( + provider.connection, + wallet.payer, + mint, + tokenPDA, + wallet.payer, + 100, + ); + + await program.methods + .secureWithdraw() + .accounts({ + vault: vaultPDA, + tokenAccount: tokenPDA, + withdrawDestination, + authority: wallet.publicKey, + }) + .rpc(); + + const tokenAccount = await getAccount(provider.connection, tokenPDA); + expect(Number(tokenAccount.amount)).to.equal(0); + } catch (error) { + throw new Error(`Secure withdraw failed unexpectedly: ${error.message}`); + } + }); }) ``` Run `anchor test` to see that the transaction using an incorrect authority -account will now return an Anchor Error while the transaction using correct -accounts completes successfully. +account will now return an Anchor Error while the transaction using the correct +accounts complete successfully. ```bash -'Program Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS invoke [1]', -'Program log: Instruction: SecureWithdraw', -'Program log: AnchorError caused by account: vault. Error Code: ConstraintHasOne. Error Number: 2001. Error Message: A has one constraint was violated.', -'Program log: Left:', -'Program log: DfLZV18rD7wCQwjYvhTFwuvLh49WSbXFeJFPQb5czifH', -'Program log: Right:', -'Program log: 5ovvmG5ntwUC7uhNWfirjBHbZD96fwuXDMGXiyMwPg87', -'Program Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS consumed 10401 of 200000 compute units', -'Program Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS failed: custom program error: 0x7d1' +"Program J89xWAprDsLAAwcTA6AhrK49UMSAYJJWdXvw4ZQK4suu invoke [1]", +"Program log: Instruction: SecureWithdraw", +"Program log: AnchorError caused by account: vault. Error Code: ConstraintHasOne. Error Number: 2001. Error Message: A has one constraint was violated.", +"Program log: Left:", +"Program log: GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM", +"Program log: Right:", +"Program log: 2jTDDwaPzbpG2oFnnqtuHJpiS9k9dDVqzzfA2ofcqfFS", +"Program J89xWAprDsLAAwcTA6AhrK49UMSAYJJWdXvw4ZQK4suu consumed 11790 of 200000 compute units", +"Program J89xWAprDsLAAwcTA6AhrK49UMSAYJJWdXvw4ZQK4suu failed: custom program error: 0x7d1" ``` Note that Anchor specifies in the logs the account that causes the error (`AnchorError caused by account: vault`). ```bash -✔ Secure withdraw, expect error (77ms) -✔ Secure withdraw (10073ms) +✔ prevents unauthorized secure withdrawal +✔ allows secure withdrawal by authorized user (1713ms) ``` And just like that, you've closed up the security loophole. The theme across most of these potential exploits is that they're quite simple. However, as your -programs grow in scope and complexity, it becomse increasingly easy to miss +programs grow in scope and complexity, it becomes increasingly easy to miss possible exploits. It's great to get in a habit of writing tests that send instructions that _shouldn't_ work. The more the better. That way you catch problems before you deploy. If you want to take a look at the final solution code you can find it on the -`solution` branch of -[the repository](https://github.com/Unboxed-Software/solana-account-data-matching/tree/solution). +[`solution` branch of the repository](https://github.com/solana-developers/account-data-matching/tree/solution). ## Challenge @@ -528,6 +561,7 @@ Remember, if you find a bug or exploit in somebody else's program, please alert them! If you find one in your own program, be sure to patch it right away. + Push your code to GitHub and [tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=a107787e-ad33-42bb-96b3-0592efc1b92f)! diff --git a/content/courses/program-security/bump-seed-canonicalization.md b/content/courses/program-security/bump-seed-canonicalization.md index e2ea4d93f..1ee208d63 100644 --- a/content/courses/program-security/bump-seed-canonicalization.md +++ b/content/courses/program-security/bump-seed-canonicalization.md @@ -8,41 +8,53 @@ objectives: - Use Anchor's `seeds` and `bump` constraints to ensure the canonical bump is always used in future instructions when deriving a PDA description: - "Understand the need for consistent PDA calculation by storing and reusuing - the canonical bump." + "Understand the need for consistent PDA calculation by storing and reusing the + canonical bump." --- ## Summary - The [**`create_program_address`**](https://docs.rs/solana-program/latest/solana_program/pubkey/struct.Pubkey.html#method.create_program_address) - function derives a PDA without searching for the **canonical bump**. This - means there are multiple valid bumps, all of which will produce different - addresses. + function derives a PDA but does so without searching for the canonical bump. + It allows multiple valid bumps to produce different addresses. While this can + still generate a valid PDA, it lacks determinism, as multiple bumps may yield + different addresses for the same set of seeds. - Using [**`find_program_address`**](https://docs.rs/solana-program/latest/solana_program/pubkey/struct.Pubkey.html#method.find_program_address) - ensures that the highest valid bump, or canonical bump, is used for the - derivation, thus creating a deterministic way to find an address given - specific seeds. -- Upon initialization, you can use Anchor's `seeds` and `bump` constraint to - ensure that PDA derivations in the account validation struct always use the - canonical bump -- Anchor allows you to **specify a bump** with the `bump = ` - constraint when verifying the address of a PDA -- Because `find_program_address` can be expensive, best practice is to store the - derived bump in an account's data field to be referenced later on when - re-deriving the address for verification + ensures that the **highest valid bump**, often referred to as the **canonical + bump**, is used in the PDA derivation. This provides a deterministic way to + compute an address for a given set of seeds, ensuring consistency across the + program. +- In Anchor, you can specify the `seeds` and the `bump` to ensure that PDA + derivations in your account validation struct always align with the correct + canonical bump. +- Anchor also allows you to specify a bump directly in the validation struct + using the `bump = ` constraint. This ensures that the correct bump + is used when verifying the PDA. +- Using `find_program_address` can be computationally expensive due to the + process of searching for the highest valid bump. It's considered best practice + to store the derived bump in an account's data field upon initialization. This + allows the bump to be referenced in subsequent instruction handlers, avoiding + the need to repeatedly call `find_program_address` to re-derive the PDA. + ```rust #[derive(Accounts)] pub struct VerifyAddress<'info> { - #[account( - seeds = [DATA_PDA_SEED.as_bytes()], - bump = data.bump - )] - data: Account<'info, Data>, + #[account( + seeds = [DATA_PDA_SEED.as_bytes()], + bump = data.bump + )] + data: Account<'info, Data>, } ``` +- In summary, while `create_program_address` can generate a PDA, + `find_program_address` ensures consistency and reliability by always producing + the canonical bump, which is critical for deterministic program execution. + This helps maintain integrity in onchain apps, especially when validating PDAs + across multiple instruction handlers. + ## Lesson Bump seeds are a number between 0 and 255, inclusive, used to ensure that an @@ -52,37 +64,39 @@ is a valid PDA. The **canonical bump** is the highest bump value that produces a valid PDA. The standard in Solana is to _always use the canonical bump_ when deriving PDAs, both for security and convenience. -### Insecure PDA derivation using `create_program_address` +### Insecure PDA Derivation using create_program_address Given a set of seeds, the `create_program_address` function will produce a valid PDA about 50% of the time. The bump seed is an additional byte added as a seed -to "bump" the derived address into valid territory. Since there are 256 possible -bump seeds and the function produces valid PDAs approximately 50% of the time, -there are many valid bumps for a given set of input seeds. +to "bump" the derived address into a valid territory. Since there are 256 +possible bump seeds and the function produces valid PDAs approximately 50% of +the time, there are many valid bumps for a given set of input seeds. -You can imagine that this could cause confusion for locating accounts when using +You can imagine that this could cause confusion in locating accounts when using seeds as a way of mapping between known pieces of information to accounts. Using the canonical bump as the standard ensures that you can always find the right account. More importantly, it avoids security exploits caused by the open-ended nature of allowing multiple bumps. -In the example below, the `set_value` instruction uses a `bump` that was passed -in as instruction data to derive a PDA. The instruction then derives the PDA -using `create_program_address` function and checks that the `address` matches -the public key of the `data` account. +In the example below, the `set_value` instruction handler uses a `bump` that was +passed in as instruction data to derive a PDA. The instruction handler then +derives the PDA using `create_program_address` function and checks that the +`address` matches the public key of the `data` account. ```rust use anchor_lang::prelude::*; -declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); +declare_id!("ABQaKhtpYQUUgZ9m2sAY7ZHxWv6KyNdhUJW8Dh8NQbkf"); #[program] pub mod bump_seed_canonicalization_insecure { use super::*; + // Insecure PDA Derivation using create_program_address pub fn set_value(ctx: Context, key: u64, new_value: u64, bump: u8) -> Result<()> { let address = - Pubkey::create_program_address(&[key.to_le_bytes().as_ref(), &[bump]], ctx.program_id).unwrap(); + Pubkey::create_program_address(&[key.to_le_bytes().as_ref(), &[bump]], ctx.program_id) + .unwrap(); if address != ctx.accounts.data.key() { return Err(ProgramError::InvalidArgument.into()); } @@ -95,33 +109,34 @@ pub mod bump_seed_canonicalization_insecure { #[derive(Accounts)] pub struct BumpSeed<'info> { - data: Account<'info, Data>, + #[account(mut)] + pub data: Account<'info, Data>, } #[account] pub struct Data { - value: u64, + pub value: u64, } ``` -While the instruction derives the PDA and checks the passed-in account, which is -good, it allows the caller to pass in an arbitrary bump. Depending on the -context of your program, this could result in undesired behavior or potential -exploit. +While the instruction handler derives the PDA and checks the passed-in account, +which is good, it allows the caller to pass in an arbitrary bump. Depending on +the context of your program, this could result in undesired behavior or +potential exploit. If the seed mapping was meant to enforce a one-to-one relationship between PDA and user, for example, this program would not properly enforce that. A user could call the program multiple times with many valid bumps, each producing a different PDA. -### Recommended derivation using `find_program_address` +### Recommended Derivation using find_program_address A simple way around this problem is to have the program expect only the canonical bump and use `find_program_address` to derive the PDA. The [`find_program_address`](https://docs.rs/solana-program/latest/solana_program/pubkey/struct.Pubkey.html#method.find_program_address) -_always uses the canonical bump_. This function iterates through calling +_always uses the canonical bump_. This function iterates by calling `create_program_address`, starting with a bump of 255 and decrementing the bump by one with each iteration. As soon as a valid address is found, the function returns both the derived PDA and the canonical bump used to derive it. @@ -151,7 +166,7 @@ pub fn set_value_secure( } ``` -### Use Anchor's `seeds` and `bump` constraints +### Use Anchor's seeds and bump Constraints Anchor provides a convenient way to derive PDAs in the account validation struct using the `seeds` and `bump` constraints. These can even be combined with the @@ -166,6 +181,8 @@ use anchor_lang::prelude::*; declare_id!("Fg6PaFpoGXkYsidMpWTK6W2BeZ7FEfcYkg476zPFsLnS"); +pub const DISCRIMINATOR_SIZE: usize = 8; + #[program] pub mod bump_seed_canonicalization_recommended { use super::*; @@ -175,28 +192,29 @@ pub mod bump_seed_canonicalization_recommended { Ok(()) } } - -// initialize account at PDA +// Initialize account at PDA #[derive(Accounts)] #[instruction(key: u64)] pub struct BumpSeed<'info> { - #[account(mut)] - payer: Signer<'info>, - #[account( - init, - seeds = [key.to_le_bytes().as_ref()], - // derives the PDA using the canonical bump - bump, - payer = payer, - space = 8 + 8 - )] - data: Account<'info, Data>, - system_program: Program<'info, System> + #[account(mut)] + pub payer: Signer<'info>, + #[account( + init, + seeds = [key.to_le_bytes().as_ref()], + // Derives the PDA using the canonical bump + bump, + payer = payer, + space = DISCRIMINATOR_SIZE + Data::INIT_SPACE + )] + pub data: Account<'info, Data>, + + pub system_program: Program<'info, System>, } #[account] +#[derive(InitSpace)] pub struct Data { - value: u64, + pub value: u64, } ``` @@ -210,7 +228,7 @@ arbitrary bumps, but rather to let you optimize your program. The iterative nature of `find_program_address` makes it expensive, so best practice is to store the canonical bump in the PDA account's data upon initializing a PDA, allowing you to reference the bump stored when validating the PDA in subsequent -instructions. +instruction handlers. When you specify the bump to use, Anchor uses `create_program_address` with the provided bump instead of `find_program_address`. This pattern of storing the @@ -222,66 +240,76 @@ use anchor_lang::prelude::*; declare_id!("CVwV9RoebTbmzsGg1uqU1s4a3LvTKseewZKmaNLSxTqc"); +// Constant for account space calculation +pub const DISCRIMINATOR_SIZE: usize = 8; + #[program] pub mod bump_seed_canonicalization_recommended { use super::*; + // Instruction handler to set a value and store the bump pub fn set_value(ctx: Context, _key: u64, new_value: u64) -> Result<()> { ctx.accounts.data.value = new_value; - // store the bump on the account - ctx.accounts.data.bump = *ctx.bumps.get("data").unwrap(); + + // Store the canonical bump on the account + // This bump is automatically derived by Anchor + ctx.accounts.data.bump = ctx.bumps.data; + Ok(()) } + // Instruction handler to verify the PDA address pub fn verify_address(ctx: Context, _key: u64) -> Result<()> { msg!("PDA confirmed to be derived with canonical bump: {}", ctx.accounts.data.key()); Ok(()) } } -// initialize account at PDA +// Account validation struct for initializing the PDA account #[derive(Accounts)] #[instruction(key: u64)] pub struct BumpSeed<'info> { - #[account(mut)] - payer: Signer<'info>, - #[account( - init, - seeds = [key.to_le_bytes().as_ref()], - // derives the PDA using the canonical bump - bump, - payer = payer, - space = 8 + 8 + 1 - )] - data: Account<'info, Data>, - system_program: Program<'info, System> + #[account(mut)] + pub payer: Signer<'info>, + + #[account( + init, + seeds = [key.to_le_bytes().as_ref()], + bump, // Anchor automatically uses the canonical bump + payer = payer, + space = DISCRIMINATOR_SIZE + Data::INIT_SPACE + )] + pub data: Account<'info, Data>, + + pub system_program: Program<'info, System> } +// Account validation struct for verifying the PDA address #[derive(Accounts)] #[instruction(key: u64)] pub struct VerifyAddress<'info> { - #[account( - seeds = [key.to_le_bytes().as_ref()], - // guranteed to be the canonical bump every time - bump = data.bump - )] - data: Account<'info, Data>, + #[account( + seeds = [key.to_le_bytes().as_ref()], + bump = data.bump // Use the stored bump, guaranteed to be canonical + )] + pub data: Account<'info, Data>, } +// Data structure for the PDA account #[account] +#[derive(InitSpace)] pub struct Data { - value: u64, - // bump field - bump: u8 + pub value: u64, + pub bump: u8 // Stores the canonical bump } ``` If you don't specify the bump on the `bump` constraint, Anchor will still use `find_program_address` to derive the PDA using the canonical bump. As a -consequence, your instruction will incur a variable amount of compute budget. -Programs that are already at risk of exceeding their compute budget should use -this with care since there is a chance that the program's budget may be -occasionally and unpredictably exceeded. +consequence, your instruction handler will incur a variable amount of compute +budget. Programs that are already at risk of exceeding their compute budget +should use this with care since there is a chance that the program's budget may +be occasionally and unpredictably exceeded. On the other hand, if you only need to verify the address of a PDA passed in without initializing an account, you'll be forced to either let Anchor derive @@ -294,35 +322,35 @@ To demonstrate the security exploits possible when you don't check for the canonical bump, let's work with a program that lets each program user "claim" rewards on time. -#### 1. Setup +### 1. Setup -Start by getting the code on the `starter` branch of -[this repository](https://github.com/Unboxed-Software/solana-bump-seed-canonicalization/tree/starter). +Start by getting the code on the +[`starter` branch of this repository](https://github.com/solana-developers/bump-seed-canonicalization/tree/starter). -Notice that there are two instructions on the program and a single test in the -`tests` directory. +Notice that there are two instruction handlers on the program and a single test +in the `tests` directory. -The instructions on the program are: +The instruction handlers on the program are: 1. `create_user_insecure` 2. `claim_insecure` -The `create_user_insecure` instruction simply creates a new account at a PDA -derived using the signer's public key and a passed-in bump. +The `create_user_insecure` instruction handler simply creates a new account at a +PDA derived using the signer's public key and a passed-in bump. -The `claim_insecure` instruction mints 10 tokens to the user and then marks the -account's rewards as claimed so that they can't claim again. +The `claim_insecure` instruction handler mints 10 tokens to the user and then +marks the account's rewards as claimed so that they can't claim again. However, the program doesn't explicitly check that the PDAs in question are using the canonical bump. Have a look at the program to understand what it does before proceeding. -#### 2. Test insecure instructions +### 2. Test Insecure Instruction Handlers -Since the instructions don't explicitly require the `user` PDA to use the -canonical bump, an attacker can create multiple accounts per wallet and claim -more rewards than should be allowed. +Since the instruction handlers don't explicitly require the `user` PDA to use +the canonical bump, an attacker can create multiple accounts per wallet and +claim more rewards than should be allowed. The test in the `tests` directory creates a new keypair called `attacker` to represent an attacker. It then loops through all possible bumps and calls @@ -331,156 +359,188 @@ the attacker has been able to claim rewards multiple times and has earned more than the 10 tokens allotted per user. ```typescript -it("Attacker can claim more than reward limit with insecure instructions", async () => { - const attacker = Keypair.generate(); - await safeAirdrop(attacker.publicKey, provider.connection); - const ataKey = await getAssociatedTokenAddress(mint, attacker.publicKey); - - let numClaims = 0; - - for (let i = 0; i < 256; i++) { - try { - const pda = createProgramAddressSync( - [attacker.publicKey.toBuffer(), Buffer.from([i])], - program.programId, - ); - await program.methods - .createUserInsecure(i) - .accounts({ - user: pda, - payer: attacker.publicKey, - }) - .signers([attacker]) - .rpc(); - await program.methods - .claimInsecure(i) - .accounts({ - user: pda, - mint, - payer: attacker.publicKey, - userAta: ataKey, - }) - .signers([attacker]) - .rpc(); - - numClaims += 1; - } catch (error) { - if (error.message !== "Invalid seeds, address must fall off the curve") { - console.log(error); +it("allows attacker to claim more than reward limit with insecure instruction handlers", async () => { + try { + const attacker = Keypair.generate(); + await airdropIfRequired( + connection, + attacker.publicKey, + 1 * LAMPORTS_PER_SOL, + 0.5 * LAMPORTS_PER_SOL, + ); + const ataKey = await getAssociatedTokenAddress(mint, attacker.publicKey); + + let successfulClaimCount = 0; + + for (let i = 0; i < 256; i++) { + try { + const pda = anchor.web3.PublicKey.createProgramAddressSync( + [attacker.publicKey.toBuffer(), Buffer.from([i])], + program.programId, + ); + await program.methods + .createUserInsecure(i) + .accounts({ + user: pda, + payer: attacker.publicKey, + }) + .signers([attacker]) + .rpc(); + await program.methods + .claimInsecure(i) + .accounts({ + user: pda, + mint, + payer: attacker.publicKey, + userAta: ataKey, + mintAuthority, + tokenProgram: anchor.utils.token.TOKEN_PROGRAM_ID, + associatedTokenProgram: anchor.utils.token.ASSOCIATED_PROGRAM_ID, + systemProgram: anchor.web3.SystemProgram.programId, + rent: anchor.web3.SYSVAR_RENT_PUBKEY, + }) + .signers([attacker]) + .rpc(); + + successfulClaimCount += 1; + } catch (error) { + if ( + error instanceof Error && + !error.message.includes( + "Invalid seeds, address must fall off the curve", + ) + ) { + console.error(error); + } } } - } - const ata = await getAccount(provider.connection, ataKey); + const ata = await getAccount(connection, ataKey); - console.log( - `Attacker claimed ${numClaims} times and got ${Number(ata.amount)} tokens`, - ); + console.log( + `Attacker claimed ${successfulClaimCount} times and got ${Number( + ata.amount, + )} tokens`, + ); - expect(numClaims).to.be.greaterThan(1); - expect(Number(ata.amount)).to.be.greaterThan(10); + expect(successfulClaimCount).to.be.greaterThan(1); + expect(Number(ata.amount)).to.be.greaterThan(10); + } catch (error) { + throw new Error(`Test failed: ${error.message}`); + } }); ``` Run `anchor test` to see that this test passes, showing that the attacker is -successful. Since the test calles the instructions for every valid bump, it -takes a bit to run, so be patient. +successful. Since the test calls the instruction handlers for every valid bump, +it takes a bit to run, so be patient. ```bash - bump-seed-canonicalization -Attacker claimed 129 times and got 1290 tokens - ✔ Attacker can claim more than reward limit with insecure instructions (133840ms) + Bump seed canonicalization +Attacker claimed 121 times and got 1210 tokens + ✔ allows attacker to claim more than reward limit with insecure instructions (119994ms) ``` -#### 3. Create secure instructions +### 3. Create Secure Instruction Handler -Let's demonstrate patching the vulnerability by creating two new instructions: +Let's demonstrate patching the vulnerability by creating two new instruction +handlers: 1. `create_user_secure` 2. `claim_secure` -Before we write the account validation or instruction logic, let's create a new -user type, `UserSecure`. This new type will add the canonical bump as a field on -the struct. +Before we write the account validation or instruction handler logic, let's +create a new user type, `UserSecure`. This new type will add the canonical bump +as a field on the struct. ```rust +// Secure user account structure #[account] +#[derive(InitSpace)] pub struct UserSecure { - auth: Pubkey, - bump: u8, - rewards_claimed: bool, + pub auth: Pubkey, + pub bump: u8, + pub rewards_claimed: bool, } ``` -Next, let's create account validation structs for each of the new instructions. -They'll be very similar to the insecure versions but will let Anchor handle the -derivation and deserialization of the PDAs. +Next, let's create account validation structs for each of the new instruction +handlers. They'll be very similar to the insecure versions but will let Anchor +handle the derivation and deserialization of the PDAs. ```rust +// Account validation struct for securely creating a user account #[derive(Accounts)] pub struct CreateUserSecure<'info> { #[account(mut)] - payer: Signer<'info>, + pub payer: Signer<'info>, #[account( init, - seeds = [payer.key().as_ref()], - // derives the PDA using the canonical bump - bump, payer = payer, - space = 8 + 32 + 1 + 1 + space = DISCRIMINATOR_SIZE + UserSecure::INIT_SPACE, + seeds = [payer.key().as_ref()], + bump )] - user: Account<'info, UserSecure>, - system_program: Program<'info, System>, + pub user: Account<'info, UserSecure>, + pub system_program: Program<'info, System>, } +// Account validation struct for secure claiming of rewards #[derive(Accounts)] pub struct SecureClaim<'info> { #[account( + mut, seeds = [payer.key().as_ref()], bump = user.bump, constraint = !user.rewards_claimed @ ClaimError::AlreadyClaimed, constraint = user.auth == payer.key() )] - user: Account<'info, UserSecure>, + pub user: Account<'info, UserSecure>, #[account(mut)] - payer: Signer<'info>, + pub payer: Signer<'info>, #[account( init_if_needed, payer = payer, associated_token::mint = mint, associated_token::authority = payer )] - user_ata: Account<'info, TokenAccount>, + pub user_ata: Account<'info, TokenAccount>, #[account(mut)] - mint: Account<'info, Mint>, - /// CHECK: mint auth PDA - #[account(seeds = ["mint".as_bytes().as_ref()], bump)] + pub mint: Account<'info, Mint>, + /// CHECK: This is the mint authority PDA, checked by seeds constraint + #[account(seeds = [b"mint"], bump)] pub mint_authority: UncheckedAccount<'info>, - token_program: Program<'info, Token>, - associated_token_program: Program<'info, AssociatedToken>, - system_program: Program<'info, System>, - rent: Sysvar<'info, Rent>, + pub token_program: Program<'info, Token>, + pub associated_token_program: Program<'info, AssociatedToken>, + pub system_program: Program<'info, System>, + pub rent: Sysvar<'info, Rent>, } ``` -Finally, let's implement the instruction logic for the two new instructions. The -`create_user_secure` instruction simply needs to set the `auth`, `bump` and -`rewards_claimed` fields on the `user` account data. +Finally, let's implement the instruction handler logic for the two new +instruction handlers. The `create_user_secure` instruction handler simply needs +to set the `auth`, `bump` and `rewards_claimed` fields on the `user` account +data. ```rust +// Secure instruction to create a user account pub fn create_user_secure(ctx: Context) -> Result<()> { - ctx.accounts.user.auth = ctx.accounts.payer.key(); - ctx.accounts.user.bump = *ctx.bumps.get("user").unwrap(); - ctx.accounts.user.rewards_claimed = false; + ctx.accounts.user.set_inner(UserSecure { + auth: ctx.accounts.payer.key(), + bump: ctx.bumps.user, + rewards_claimed: false, + }); Ok(()) } ``` -The `claim_secure` instruction needs to mint 10 tokens to the user and set the -`user` account's `rewards_claimed` field to `true`. +The `claim_secure` instruction handler needs to mint 10 tokens to the user and +set the `user` account's `rewards_claimed` field to `true`. ```rust +// Secure instruction to claim rewards pub fn claim_secure(ctx: Context) -> Result<()> { + // Mint tokens to the user's associated token account token::mint_to( CpiContext::new_with_signer( ctx.accounts.token_program.to_account_info(), @@ -489,104 +549,142 @@ pub fn claim_secure(ctx: Context) -> Result<()> { to: ctx.accounts.user_ata.to_account_info(), authority: ctx.accounts.mint_authority.to_account_info(), }, - &[&[ - b"mint".as_ref(), - &[*ctx.bumps.get("mint_authority").unwrap()], - ]], + &[&[b"mint", &[ctx.bumps.mint_authority]]], ), 10, )?; + // Mark rewards as claimed ctx.accounts.user.rewards_claimed = true; Ok(()) } ``` -#### 4. Test secure instructions +### 4. Test Secure Instruction Handlers Let's go ahead and write a test to show that the attacker can no longer claim -more than once using the new instructions. +more than once using the new instruction handlers. Notice that if you start to loop through using multiple PDAs like the old test, -you can't even pass the non-canonical bump to the instructions. However, you can -still loop through using the various PDAs and at the end check that only 1 claim -happened for a total of 10 tokens. Your final test will look something like -this: +you can't even pass the non-canonical bump to the instruction handlers. However, +you can still loop through using the various PDAs and at the end check that only +1 claim happened for a total of 10 tokens. Your final test will look something +like this: ```typescript -it.only("Attacker can only claim once with secure instructions", async () => { - const attacker = Keypair.generate(); - await safeAirdrop(attacker.publicKey, provider.connection); - const ataKey = await getAssociatedTokenAddress(mint, attacker.publicKey); - const [userPDA] = findProgramAddressSync( - [attacker.publicKey.toBuffer()], - program.programId, - ); - - await program.methods - .createUserSecure() - .accounts({ - payer: attacker.publicKey, - }) - .signers([attacker]) - .rpc(); - - await program.methods - .claimSecure() - .accounts({ - payer: attacker.publicKey, - userAta: ataKey, - mint, - user: userPDA, - }) - .signers([attacker]) - .rpc(); - - let numClaims = 1; - - for (let i = 0; i < 256; i++) { - try { - const pda = createProgramAddressSync( - [attacker.publicKey.toBuffer(), Buffer.from([i])], - program.programId, - ); - await program.methods - .createUserSecure() - .accounts({ - user: pda, - payer: attacker.publicKey, - }) - .signers([attacker]) - .rpc(); - - await program.methods - .claimSecure() - .accounts({ - payer: attacker.publicKey, - userAta: ataKey, - mint, - user: pda, - }) - .signers([attacker]) - .rpc(); - - numClaims += 1; - } catch {} - } +it("allows attacker to claim only once with secure instruction handlers", async () => { + try { + const attacker = Keypair.generate(); + await airdropIfRequired( + connection, + attacker.publicKey, + 1 * LAMPORTS_PER_SOL, + 0.5 * LAMPORTS_PER_SOL, + ); + const ataKey = await getAssociatedTokenAddress(mint, attacker.publicKey); + const [userPDA] = anchor.web3.PublicKey.findProgramAddressSync( + [attacker.publicKey.toBuffer()], + program.programId, + ); + + await program.methods + .createUserSecure() + .accounts({ + payer: attacker.publicKey, + user: userPDA, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([attacker]) + .rpc(); + + await program.methods + .claimSecure() + .accounts({ + payer: attacker.publicKey, + user: userPDA, + userAta: ataKey, + mint, + mintAuthority, + tokenProgram: anchor.utils.token.TOKEN_PROGRAM_ID, + associatedTokenProgram: anchor.utils.token.ASSOCIATED_PROGRAM_ID, + systemProgram: anchor.web3.SystemProgram.programId, + rent: anchor.web3.SYSVAR_RENT_PUBKEY, + }) + .signers([attacker]) + .rpc(); + + let successfulClaimCount = 1; + + for (let i = 0; i < 256; i++) { + try { + const pda = anchor.web3.PublicKey.createProgramAddressSync( + [attacker.publicKey.toBuffer(), Buffer.from([i])], + program.programId, + ); + await program.methods + .createUserSecure() + .accounts({ + user: pda, + payer: attacker.publicKey, + systemProgram: anchor.web3.SystemProgram.programId, + }) + .signers([attacker]) + .rpc(); + + await program.methods + .claimSecure() + .accounts({ + payer: attacker.publicKey, + user: pda, + userAta: ataKey, + mint, + mintAuthority, + tokenProgram: anchor.utils.token.TOKEN_PROGRAM_ID, + associatedTokenProgram: anchor.utils.token.ASSOCIATED_PROGRAM_ID, + systemProgram: anchor.web3.SystemProgram.programId, + rent: anchor.web3.SYSVAR_RENT_PUBKEY, + }) + .signers([attacker]) + .rpc(); + + successfulClaimCount += 1; + } catch (error) { + if ( + error instanceof Error && + !error.message.includes("Error Number: 2006") && + !error.message.includes( + "Invalid seeds, address must fall off the curve", + ) + ) { + // Comment console error logs to see the test outputs properly + console.error(error); + } + } + } + + const ata = await getAccount(connection, ataKey); - const ata = await getAccount(provider.connection, ataKey); + console.log( + `Attacker claimed ${successfulClaimCount} times and got ${Number( + ata.amount, + )} tokens`, + ); - expect(Number(ata.amount)).to.equal(10); - expect(numClaims).to.equal(1); + expect(Number(ata.amount)).to.equal(10); + expect(successfulClaimCount).to.equal(1); + } catch (error) { + throw new Error(`Test failed: ${error.message}`); + } }); ``` ```bash - bump-seed-canonicalization + Bump seed canonicalization Attacker claimed 119 times and got 1190 tokens - ✔ Attacker can claim more than reward limit with insecure instructions (128493ms) - ✔ Attacker can only claim once with secure instructions (1448ms) + ✔ allows attacker to claim more than reward limit with insecure instruction handlers (117370ms) +Attacker claimed 1 times and got 10 tokens + ✔ allows attacker to claim only once with secure instruction handlers (16362ms) ``` If you use Anchor for all of the PDA derivations, this particular exploit is @@ -594,8 +692,7 @@ pretty simple to avoid. However, if you end up doing anything "non-standard," be careful to design your program to explicitly use the canonical bump! If you want to take a look at the final solution code you can find it on the -`solution` branch of -[the same repository](https://github.com/Unboxed-Software/solana-bump-seed-canonicalization/tree/solution). +[`solution` branch of the same repository](https://github.com/solana-developers/bump-seed-canonicalization/tree/solution). ## Challenge @@ -609,6 +706,7 @@ Remember, if you find a bug or exploit in somebody else's program, please alert them! If you find one in your own program, be sure to patch it right away. + Push your code to GitHub and [tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=d3f6ca7a-11c8-421f-b7a3-d6c08ef1aa8b)! diff --git a/content/courses/solana-pay/solana-pay.md b/content/courses/solana-pay/solana-pay.md index b8b8a0cf9..5d08a75e4 100644 --- a/content/courses/solana-pay/solana-pay.md +++ b/content/courses/solana-pay/solana-pay.md @@ -3,10 +3,9 @@ title: Solana Pay objectives: - Use the Solana Pay specification to build payment requests and initiate transactions using URLs encoded as QR codes - - Use the `@solana/pay` library to help with the creation of Solana Pay - transaction requests + - Use the `@solana/pay` library to create Solana Pay transaction requests - Partially sign transactions and implement transaction gating based on - certain conditions + specific conditions description: "How to create Solana Pay payment requests using links and QR codes." --- @@ -15,19 +14,19 @@ description: - **Solana Pay** is a specification for encoding Solana transaction requests within URLs, enabling standardized transaction requests across different - Solana apps and wallets -- **Partial signing** of transactions allows for the creation of transactions - that require multiple signatures before they are submitted to the network + Solana apps and wallets. +- **Partial signing** of transactions allows the creation of transactions that + require multiple signatures before they are submitted to the network. - **Transaction gating** involves implementing rules that determine whether - certain transactions are allowed to be processed or not, based on certain - conditions or the presence of specific data in the transaction + certain transactions are allowed to be processed, based on specific conditions + or the presence of particular data in the transaction. ## Lesson The Solana community is continually improving and expanding the network's -functionality. But that doesn't always mean developing brand new technology. +functionality. But that doesn't always mean developing brand-new technology. Sometimes it means leveraging the network's existing features in new and -interesting ways. +innovative ways. Solana Pay is a great example of this. Rather than adding new functionality to the network, Solana Pay uses the network's existing signing features in a unique @@ -38,17 +37,17 @@ Throughout this lesson, you'll learn how to use Solana Pay to create transfer and transaction requests, encode these requests as a QR code, partially sign transactions, and gate transactions based on conditions you choose. Rather than leaving it at that, we hope you'll see this as an example of leveraging existing -features in new and interesting ways, using it as a launching pad for your own +features in new and innovative ways, using it as a launching pad for your own unique client-side network interactions. ### Solana Pay The [Solana Pay specification](https://docs.solanapay.com/spec) is a set of standards that allow users to request payments and initiate transactions using -URLs in a uniform way across various Solana apps and wallets. +URLs uniformly across various Solana apps and wallets. Request URLs are prefixed with `solana:` so that platforms can direct the link -to the appropriate application. For example, on mobile a URL that starts with +to the appropriate application. For example, on mobile, a URL that starts with `solana:` will be directed to wallet applications that support the Solana Pay specification. From there, the wallet can use the remainder of the URL to appropriately handle the request. @@ -141,8 +140,8 @@ a function that handles the request and response. import { NextApiRequest, NextApiResponse } from "next"; export default async function handler( - req: NextApiRequest, - res: NextApiResponse, + request: NextApiRequest, + response: NextApiResponse, ) { // Handle the request } @@ -163,18 +162,17 @@ Building on the empty endpoint from before, that may look like this: import { NextApiRequest, NextApiResponse } from "next"; export default async function handler( - req: NextApiRequest, - res: NextApiResponse, + request: NextApiRequest, + response: NextApiResponse, ) { - if (req.method === "GET") { - return get(res); - } else { - return res.status(405).json({ error: "Method not allowed" }); + if (request.method === "GET") { + return get(response); } + return response.status(405).json({ error: "Method not allowed" }); } -function get(res: NextApiResponse) { - res.status(200).json({ +function get(response: NextApiResponse) { + response.status(200).json({ label: "Store Name", icon: "https://solana.com/src/img/branding/solanaLogoMark.svg", }); @@ -205,26 +203,26 @@ transaction and return it to the wallet for signing by: import { NextApiRequest, NextApiResponse } from "next"; export default async function handler( - req: NextApiRequest, - res: NextApiResponse, + request: NextApiRequest, + response: NextApiResponse, ) { - if (req.method === "GET") { - return get(res); - } else if (req.method === "POST") { - return post(req, res); - } else { - return res.status(405).json({ error: "Method not allowed" }); + if (request.method === "GET") { + return get(response); } + if (request.method === "POST") { + return post(request, response); + } + return response.status(405).json({ error: "Method not allowed" }); } -function get(res: NextApiResponse) { - res.status(200).json({ +function get(response: NextApiResponse) { + response.status(200).json({ label: "Store Name", icon: "https://solana.com/src/img/branding/solanaLogoMark.svg", }); } -async function post(req: PublicKey, res: PublicKey) { - const { account, reference } = req.body; +async function post(request: NextApiRequest, response: NextApiResponse) { + const { account, reference } = request.body; const connection = new Connection(clusterApiUrl("devnet")); @@ -236,19 +234,19 @@ async function post(req: PublicKey, res: PublicKey) { }); const instruction = SystemProgram.transfer({ - fromPubkey: account, + fromPubkey: new PublicKey(account), toPubkey: Keypair.generate().publicKey, lamports: 0.001 * LAMPORTS_PER_SOL, }); - transaction.add(instruction); - - transaction.keys.push({ + instruction.keys.push({ pubkey: reference, isSigner: false, isWritable: false, }); + transaction.add(instruction); + const serializedTransaction = transaction.serialize({ requireAllSignatures: false, }); @@ -256,7 +254,7 @@ async function post(req: PublicKey, res: PublicKey) { const message = "Simple transfer of 0.001 SOL"; - res.send(200).json({ + response.status(200).json({ transaction: base64, message, }); @@ -495,21 +493,21 @@ variable. The first thing we'll do in this file is the following: import { NextApiRequest, NextApiResponse } from "next"; export default async function handler( - req: NextApiRequest, - res: NextApiResponse, + request: NextApiRequest, + response: NextApiResponse, ) { - if (req.method === "GET") { - return get(res); - } else if (req.method === "POST") { - return await post(req, res); - } else { - return res.status(405).json({ error: "Method not allowed" }); + if (request.method === "GET") { + return get(response); } + if (request.method === "POST") { + return await post(request, response); + } + return response.status(405).json({ error: "Method not allowed" }); } -function get(res: NextApiResponse) {} +function get(response: NextApiResponse) {} -async function post(req: NextApiRequest, res: NextApiResponse) {} +async function post(request: NextApiRequest, response: NextApiResponse) {} ``` #### 4. Update `get` function @@ -519,8 +517,8 @@ endpoint to return a label and icon. Update the `get` function to send a response with a "Scavenger Hunt!" label and a Solana logo icon. ```jsx -function get(res: NextApiResponse) { - res.status(200).json({ +function get(response: NextApiResponse) { + response.status(200).json({ label: "Scavenger Hunt!", icon: "https://solana.com/src/img/branding/solanaLogoMark.svg", }); @@ -561,35 +559,31 @@ import { NextApiRequest, NextApiResponse } from "next" import { PublicKey, Transaction } from "@solana/web3.js" ... -async function post(req: NextApiRequest, res: NextApiResponse) { - const { account } = req.body - const { reference, id } = req.query - - if (!account || !reference || !id) { - res.status(400).json({ error: "Missing required parameter(s)" }) - return - } - - try { - const transaction = await buildTransaction( - new PublicKey(account), - new PublicKey(reference), - id.toString() - ) - - res.status(200).json({ - transaction: transaction, - message: `You've found location ${id}!`, - }) - } catch (err) { - console.log(err) - let error = err as any - if (error.message) { - res.status(200).json({ transaction: "", message: error.message }) - } else { - res.status(500).json({ error: "error creating transaction" }) - } - } +async function post(request: NextApiRequest, response: NextApiResponse) { + const { account } = request.body; + const { reference, id } = request.query; + + if (!account || !reference || !id) { + response.status(400).json({ error: "Missing required parameter(s)" }); + return; + } + + try { + const transaction = await buildTransaction( + new PublicKey(account), + new PublicKey(reference), + id.toString(), + ); + + response.status(200).json({ + transaction: transaction, + message: `You've found location ${id}!`, + }); + } catch (error) { + console.log(error); + response.status(500).json({ transaction: "", message: error.message }); + return; + } } async function buildTransaction( @@ -769,9 +763,8 @@ function verifyCorrectLocation( if (!lastLocation || currentLocation.index !== lastLocation.index + 1) { return false; - } else { - return true; } + return true; } ``` diff --git a/content/courses/state-compression/compressed-nfts.md b/content/courses/state-compression/compressed-nfts.md index e0a7ba59d..d196ffefa 100644 --- a/content/courses/state-compression/compressed-nfts.md +++ b/content/courses/state-compression/compressed-nfts.md @@ -1,27 +1,27 @@ --- title: Compressed NFTs objectives: - - Create a compressed NFT collection using Metaplex's Bubblegum program - - Mint compressed NFTs using the Bubblegum TS SDK - - Transfer compressed NFTs using the Bubblegum TS SDK + - Create a compressed NFT collection using Metaplex’s Bubblegum program + - Mint compressed NFTs using the Bubblegum program + - Transfer compressed NFTs using the Bubblegum program - Read compressed NFT data using the Read API description: "How to mint, transfer and read large-scale NFT collections using Metaplex's - Bubblegum SDK." + Bubblegum Program." --- ## Summary - **Compressed NFTs (cNFTs)** use **State Compression** to hash NFT data and store the hash onchain in an account using a **concurrent Merkle tree** - structure -- The cNFT data hash can't be used to infer the cNFT data, but it can be used to - **verify** if the cNFT data you're seeing is correct + structure. +- The cNFT data hash can’t be used to infer the cNFT data, but it can be used to + **verify** if the cNFT data you’re seeing is correct. - Supporting RPC providers **index** cNFT data offchain when the cNFT is minted so that you can use the **Read API** to access the data - The **Metaplex Bubblegum program** is an abstraction on top of the **State Compression** program that enables you to more simply create, mint, and manage - cNFT collections + cNFT collections. ## Lesson @@ -32,10 +32,10 @@ drastically reduces costs. Solana's transaction costs are so cheap that most users never think about how expensive minting NFTs can be at scale. The cost to set up and mint 1 million -traditional NFTs is approximately 24,000 SOL. By comparison, cNFTs can be -structured to where the same setup and mint costs 10 SOL or less. That means -anyone using NFTs at scale could cut costs by more than 1000x by using cNFTs -over traditional NFTs. +traditional NFTs using the Token Metadata Program is approximately 24,000 SOL. +By comparison, cNFTs can be structured to where the same setup and mint costs 10 +SOL or less. That means anyone using NFTs at scale could cut costs by more than +1000x by using cNFTs over traditional NFTs. However, cNFTs can be tricky to work with. Eventually, the tooling required to work with them will be sufficiently abstracted from the underlying technology @@ -47,16 +47,15 @@ pieces, so let's dig in! Most of the costs associated with traditional NFTs come down to account storage space. Compressed NFTs use a concept called State Compression to store data in -the blockchain's cheaper **ledger state**, using more expensive account space -only to store a “fingerprint”, or **hash**, of the data. This hash allows you to +the blockchain’s **ledger state**, only using the account state to store a +“fingerprint”, or **hash**, of the data. This hash allows you to cryptographically verify that data has not been tampered with. To both store hashes and enable verification, we use a special binary tree structure known as a **concurrent Merkle tree**. This tree structure lets us hash data together in a deterministic way to compute a single, final hash that -gets stored onchain. This final hash is significantly smaller in size than all -the original data combined, hence the “compression.” The steps to this process -are: +gets stored onchain. This final hash is significantly smaller than all the +original data combined, hence the “compression.” The steps to this process are: 1. Take any piece of data 2. Create a hash of this data @@ -80,11 +79,11 @@ track and index this data as the transactions occur. This ensures there is an offchain “cache” of the data that anyone can access and subsequently verify against the onchain root hash. -This process is _very complex_. We'll cover some of the key concepts below but -don't worry if you don't understand it right away. We'll talk more theory in the -state compression lesson and focus primarily on application to NFTs in this -lesson. You'll be able to work with cNFTs by the end of this lesson even if you -don't fully understand every piece of the state compression puzzle. +This process is _very complex_. We’ll cover some key concepts below but don’t +worry if you don’t understand it right away. We’ll talk more theory in the state +compression lesson and focus primarily on application to NFTs in this lesson. +You’ll be able to work with cNFTs by the end of this lesson even if you don’t +fully understand every piece of the state compression puzzle. #### Concurrent Merkle trees @@ -130,9 +129,8 @@ node adds 32 bytes to a transaction, so large trees would quickly exceed the maximum transaction size limit without caching proof nodes onchain. Each of these three values, max depth, max buffer size, and canopy depth, comes -with a tradeoff. Increasing the value of any of these values increases the size -of the account used to store the tree, thus increasing the cost to create the -tree. +with a tradeoff. Increasing any of these values increases the size of the +account used to store the tree, thus increasing the cost to create the tree. Choosing the max depth is fairly straightforward as it directly relates to the number of leafs and therefore the amount of data you can store. If you need @@ -183,8 +181,9 @@ the Noop instruction logs related to your data will vary based on the validator, but eventually you'll lose access to it if you're relying directly on instruction logs. -Technically, you _can_ replay transaction state back to the genesis block but -the average team isn't going to do that, and it certainly won't be performant. +Technically, you _can_ replay transaction state back to the genesis block, but +the average team isn’t going to do that, and it certainly won’t be performant. + Instead, you should use an indexer that will observe the events sent to the Noop program and store the relevant data off chain. That way you don't need to worry about old data becoming inaccessible. @@ -197,13 +196,8 @@ main point of this lesson: how to create a cNFT collection. Fortunately, you can use tools created by Solana Foundation, the Solana developer community, and Metaplex to simplify the process. Specifically, we'll be using the `@solana/spl-account-compression` SDK, the Metaplex Bubblegum -program, and the Bubblegum program's corresponding TS SDK -`@metaplex-foundation/mpl-bugglegum`. - - +program `@metaplex-foundation/mpl-bubblegum` through the Umi library from +Metaplex. #### Prepare metadata @@ -214,53 +208,56 @@ something like this: ```json { - "name": "12_217_47", - "symbol": "RGB", - "description": "Random RGB Color", - "seller_fee_basis_points": 0, - "image": "https://raw.githubusercontent.com/ZYJLiu/rgb-png-generator/master/assets/12_217_47/12_217_47.png", + "name": "My Collection", + "symbol": "MC", + "description": "My Collection description", + "image": "https://lvvg33dqzykc2mbfa4ifua75t73tchjnfjbcspp3n3baabugh6qq.arweave.net/XWpt7HDOFC0wJQcQWgP9n_cxHS0qQik9-27CAAaGP6E", "attributes": [ { - "trait_type": "R", - "value": "12" + "trait_type": "Background", + "value": "transparent" }, { - "trait_type": "G", - "value": "217" + "trait_type": "Shape", + "value": "sphere" }, { - "trait_type": "B", - "value": "47" + "trait_type": "Resolution", + "value": "1920x1920" } ] } ``` -Depending on your use case, you may be able to generate this dynamically or you -might want to have a JSON file prepared for each cNFT beforehand. You'll also -need any other assets referenced by the JSON, such as the `image` url shown in +Depending on your use case, you may be able to generate this dynamically, or you +might want to have a JSON file prepared for each cNFT beforehand. You’ll also +need any other assets referenced by the JSON, such as the `image` URL shown in the example above. #### Create Collection NFT -If you want your cNFTs to be part of a collection, you'll need to create a -Collection NFT **before** you start minting cNFTs. This is a traditional NFT -that acts as the reference binding your cNFTs together into a single collection. -You can create this NFT using the `@metaplex-foundation/js` library. Just make -sure you set `isCollection` to `true`. +NFTs are intrinsically unique, compared to fungible tokens which have a supply. +However, it is important to bind NFTs produced by the same series together, +using a Collection. Collections allow people to discover other NFTs in the same +collection, and verify that individual NFTs are actually members of the +Collection (and not look-alikes produced by someone else). + +To have your cNFTs to be part of a collection, you’ll need to create a +Collection NFT **before** you start minting cNFTs. This is a traditional Token +Metadata Program NFT that acts as the reference binding your cNFTs together into +a single collection. The procedure to create this NFT is outlined in our +[NFTs with Metaplex lesson](https://solana.com/developers/courses/tokens-and-nfts/nfts-with-metaplex#add-the-nft-to-a-collection) ```typescript -const collectionNft = await metaplex.nfts().create({ - uri: someUri, - name: "Collection NFT", - sellerFeeBasisPoints: 0, - updateAuthority: somePublicKey, - mintAuthority: somePublicKey, - tokenStandard: 0, - symbol: "Collection", - isMutable: true, - isCollection: true, -}); +const collectionMint = generateSigner(umi); + +await createNft(umi, { + mint: collectionMint, + name: `My Collection`, + uri, + sellerFeeBasisPoints: percentAmount(0), + isCollection: true, // mint as collection NFT +}).sendAndConfirm(umi); ``` #### Create Merkle tree Account @@ -326,215 +323,151 @@ is 32 bytes it's possible to max out transaction sizes very quickly. For example, if your tree has a very low canopy depth, an NFT marketplace may only be able to support simple NFTs transfers rather than support an onchain -bidding system for your cNFTs. The canopy effectively caches proof nodes onchain -so you don't have to pass all of them into the transaction, allowing for more -complex transactions. +bidding system for your cNFTs. The canopy effectively caches proof nodes +onchain, so you don’t have to pass all of them into the transaction, allowing +for more complex transactions. Increasing any of these three values increases the size of the account, thereby increasing the cost associated with creating it. Weigh the benefits accordingly when choosing the values. -Once you know these values, you can use the `createAllocTreeIx` helper function -from the `@solana/spl-account-compression` TS SDK to create the instruction for -creating the empty account. +Once you know these values, you can use the `createTree` method from the +`@metaplex-foundation/mpl-bubblegum` package to create your tree. This +instruction creates and initializes two accounts: -```typescript -import { createAllocTreeIx } from "@solana/spl-account-compression" +1. A `Merkle Tree` account - this holds the merkle hash and is used to verify + the authenticity of data stored. + +2. A `Tree Config` account - this holds additional data specific to compressed + NFTs such as the tree creator, whether the tree is public, and + [other fields - see the Bubblehum program source](https://github.com/metaplex-foundation/mpl-bubblegum/blob/42ffed35da6b2a673efacd63030a360eac3ae64e/programs/bubblegum/program/src/state/mod.rs#L17). -const treeKeypair = Keypair.generate() +#### Setting up Umi -const allocTreeIx = await createAllocTreeIx( - connection, - treeKeypair.publicKey, - payer.publicKey, - { maxDepth: 20; maxBufferSize: 256 }, - canopyDepth -) +The `mpl-bubblegum` package is a plugin and cannot be used without the Umi +library from Metaplex. Umi is a framework for making JS/TS clients for onchain +programs that was created by Metaplex. + +Note that Umi has different implementations for many concepts than web3.js, +including Keypairs, PublicKeys, and Connections. However, it is easy to convert +from web3.js versions of these items to the Umi equivalents. + +To get started, we need to create an Umi instance + +```typescript +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); ``` -Note that this is simply a helper function for calculating the size required by -the account and creating the instruction to send to the System Program for -allocating the account. This function doesn't interact with any -compression-specific programs yet. +The above code initializes an empty Umi instance without any signer or plugin +attached to it. You can find the exhaustive list of the plugins available +[on this Metaplex docs page](https://developers.metaplex.com/umi/metaplex-umi-plugins) -#### Use Bubblegum to Initialize Your Tree +The next part is to add in our imports and attach a signer to our Umi instance. -With the empty tree account created, you then use the Bubblegum program to -initialize the tree. In addition to the Merkle tree account, Bubblegum creates a -tree config account to add cNFT-specific tracking and functionality. +```typescript +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { createTree, mplBubblegum } from "@metaplex-foundation/mpl-bubblegum"; +import { keypairIdentity } from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { getKeypairFromFile } from "@solana-developers/helpers"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); + +console.log("Loaded UMI with Bubblegum"); +``` + +#### Use Bubblegum to Initialize Your Tree -Version 0.7 of the `@metaplex-foundation/mpl-bubblegum` TS SDK provides the -helper function `createCreateTreeInstruction` for calling the `create_tree` -instruction on the Bubblegum program. As part of the call, you'll need to derive -the `treeAuthority` PDA expected by the program. This PDA uses the tree's -address as a seed. +With Umi instantiated, we are ready to call the `createTree` method to +instantiate the Merkle tree and tree config accounts. ```typescript -import { - createAllocTreeIx, - SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - SPL_NOOP_PROGRAM_ID, -} from "@solana/spl-account-compression" -import { - PROGRAM_ID as BUBBLEGUM_PROGRAM_ID, - createCreateTreeInstruction, -} from "@metaplex-foundation/mpl-bubblegum" - -... - -const [treeAuthority, _bump] = PublicKey.findProgramAddressSync( - [treeKeypair.publicKey.toBuffer()], - BUBBLEGUM_PROGRAM_ID -) - -const createTreeIx = createCreateTreeInstruction( - { - treeAuthority, - merkleTree: treeKeypair.publicKey, - payer: payer.publicKey, - treeCreator: payer.publicKey, - logWrapper: SPL_NOOP_PROGRAM_ID, - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - }, - { - maxBufferSize: 256, - maxDepth: 20, - public: false, - }, - BUBBLEGUM_PROGRAM_ID -) +const merkleTree = generateSigner(umi); +const builder = await createTree(umi, { + merkleTree, + maxDepth: 14, + maxBufferSize: 64, +}); +await builder.sendAndConfirm(umi); ``` -The list below shows the required input for this helper function: - -- `accounts` - An object representing the accounts required by the instruction. - This includes: - - `treeAuthority` - Bubblegum expects this to be a PDA derived using the - Merkle tree address as a seed - - `merkleTree` - The Merkle tree account - - `payer` - The address paying for transaction fees, rent, etc. - - `treeCreator` - The address to list as the tree creator - - `logWrapper` - The program to use to expose the data to indexers through - logs; this should be the address of the SPL Noop program unless you have - some other custom implementation - - `compressionProgram` - The compression program to use for initializing the - Merkle tree; this should be the address of the SPL State Compression program - unless you have some other custom implementation -- `args` - An object representing additional arguments required by the - instruction. This includes: - - `maxBufferSize` - The max buffer size of the Merkle tree - - `maxDepth` - The max depth of the Merkle tree - - `public` - When set to `true`, anyone will be able to mint cNFTs from the - tree; when set to `false`, only the tree creator or tree delegate will be - able to min cNFTs from the tree - -When submitted, this will invoke the `create_tree` instruction on the Bubblegum -program. This instruction does three things: - -1. Creates the tree config PDA account -2. Initializes the tree config account with appropriate initial values -3. Issues a CPI to the State Compression program to initialize the empty Merkle - tree account - -Feel free to take a look at the program code -[here](https://github.com/metaplex-foundation/mpl-bubblegum/blob/main/programs/bubblegum/program/src/lib.rs#L887). +The three values supplied i.e. the `merkleTree`, `maxDepth` and `maxBufferSize` +are required in order to create the tree while the rest are optional. For +example, the`tree creator` defaults to the Umi instance identity, while the +`public field to false. + +When set to true, `public` allows anyone to mint from the initialized tree and +if false, only the tree creator will be able to mint from the tree. + +Feel free to look at the code for the +[create_tree instruction handler](https://github.com/metaplex-foundation/mpl-bubblegum/blob/42ffed35da6b2a673efacd63030a360eac3ae64e/programs/bubblegum/program/src/processor/create_tree.rs#L40) +and +[create_tree's expected accounts](https://github.com/metaplex-foundation/mpl-bubblegum/blob/42ffed35da6b2a673efacd63030a360eac3ae64e/programs/bubblegum/program/src/processor/create_tree.rs#L20). #### Mint cNFTs With the Merkle tree account and its corresponding Bubblegum tree config account -initialized, it's possible to mint cNFTs to the tree. The Bubblegum instruction -to use will be either `mint_v1` or `mint_to_collection_v1`, depending on whether -or not you want to the minted cNFT to be part of a collection. +initialized, it’s possible to mint cNFTs to the tree. The Bubblegum library, +provides two instructions we can make use of depending on whether the minted +asset will belong to a collection. -Version 0.7 of the `@metaplex-foundation/mpl-bubblegum` TS SDK provides helper -functions `createMintV1Instruction` and `createMintToCollectionV1Instruction` to -make it easier for you to create the instructions. +The two instructions are -Both functions will require you to pass in the NFT metadata and a list of -accounts required to mint the cNFT. Below is an example of minting to a -collection: +1. **MintV1** ```typescript -const mintWithCollectionIx = createMintToCollectionV1Instruction( - { - payer: payer.publicKey, - merkleTree: treeAddress, - treeAuthority, - treeDelegate: payer.publicKey, - leafOwner: destination, - leafDelegate: destination, - collectionAuthority: payer.publicKey, - collectionAuthorityRecordPda: BUBBLEGUM_PROGRAM_ID, - collectionMint: collectionDetails.mint, - collectionMetadata: collectionDetails.metadata, - editionAccount: collectionDetails.masterEditionAccount, - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - logWrapper: SPL_NOOP_PROGRAM_ID, - bubblegumSigner, - tokenMetadataProgram: TOKEN_METADATA_PROGRAM_ID, - }, - { - metadataArgs: Object.assign(nftMetadata, { - collection: { key: collectionDetails.mint, verified: false }, - }), +await mintV1(umi, { + leafOwner, + merkleTree, + metadata: { + name: "My Compressed NFT", + uri: "https://example.com/my-cnft.json", + sellerFeeBasisPoints: 0, // 0% + collection: none(), + creators: [ + { address: umi.identity.publicKey, verified: false, share: 100 }, + ], }, -); +}).sendAndConfirm(umi); ``` -Notice that there are two arguments for the helper function: `accounts` and -`args`. The `args` parameter is simply the NFT metadata, while `accounts` is an -object listing the accounts required by the instruction. There are admittedly a -lot of them: - -- `payer` - the account that will pay for the transaction fees, rent, etc. -- `merkleTree` - the Merkle tree account -- `treeAuthority` - the tree authority; should be the same PDA you derived - previously -- `treeDelegate` - the tree delegate; this is usually the same as the tree - creator -- `leafOwner` - the desired owner of the compressed NFT being minted -- `leafDelegate` - the desired delegate of the compressed NFT being minted; this - is usually the same as the leaf owner -- `collectionAuthority` - the authority of the collection NFT -- `collectionAuthorityRecordPda` - optional collection authority record PDA; - there typically is none, in which case you should put the Bubblegum program - address -- `collectionMint` - the mint account for the collection NFT -- `collectionMetadata` - the metadata account for the collection NFT -- `editionAccount` - the master edition account of the collection NFT -- `compressionProgram` - the compression program to use; this should be the - address of the SPL State Compression program unless you have some other custom - implementation -- `logWrapper` - the program to use to expose the data to indexers through logs; - this should be the address of the SPL Noop program unless you have some other - custom implementation -- `bubblegumSigner` - a PDA used by the Bubblegrum program to handle collection - verification -- `tokenMetadataProgram` - the token metadata program that was used for the - collection NFT; this is usually always the Metaplex Token Metadata program - -Minting without a collection requires fewer accounts, none of which are -exclusive to minting without a collection. You can take a look at the example -below. +2. **mintToCollectionV1** ```typescript -const mintWithoutCollectionIx = createMintV1Instruction( - { - payer: payer.publicKey, - merkleTree: treeAddress, - treeAuthority, - treeDelegate: payer.publicKey, - leafOwner: destination, - leafDelegate: destination, - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - logWrapper: SPL_NOOP_PROGRAM_ID, - }, - { - message: nftMetadata, +await mintToCollectionV1(umi, { + leafOwner, + merkleTree, + collectionMint, + metadata: { + name: "My Compressed NFT", + uri: "https://example.com/my-cnft.json", + sellerFeeBasisPoints: 0, // 0% + collection: { key: collectionMint, verified: false }, + creators: [ + { address: umi.identity.publicKey, verified: false, share: 100 }, + ], }, -); +}).sendAndConfirm(umi); ``` +Both functions will require you to pass in the NFT metadata and a list of +accounts required to mint the cNFT such as the `leafOwner`, `merkleTree` account +etc. + ### Interact with cNFTs It's important to note that cNFTs _are not_ SPL tokens. That means your code @@ -544,17 +477,26 @@ fetching, querying, transferring, etc. #### Fetch cNFT data The simplest way to fetch data from an existing cNFT is to use the -[Digital Asset Standard Read API](https://solana.com/developers/guides/javascript/compressed-nfts#reading-compressed-nfts-metadata) -(Read API). Note that this is separate from the standard JSON RPC. To use the -Read API, you'll need to use a supporting RPC Provider. Metaplex maintains a -(likely non-exhaustive) -[list of RPC providers](https://developers.metaplex.com/bubblegum/rpcs) that -support the Read API. In this lesson we'll be using +[Digital Asset Standard Read API](https://developers.metaplex.com/das-api) (Read +API). Note that this is separate from the standard JSON RPC. To use the Read +API, you’ll need to use a supporting RPC Provider. Metaplex maintains a (likely +non-exhaustive) +[list of RPC providers that support the DAS Read API](https://developers.metaplex.com/rpc-providers#rpcs-with-das-support). + +In this lesson we’ll be using [Helius](https://docs.helius.dev/compression-and-das-api/digital-asset-standard-das-api) as they have free support for Devnet. -To use the Read API to fetch a specific cNFT, you need to have the cNFT's asset -ID. However, after minting cNFTs, you'll have at most two pieces of information: +You might need to update your RPC connection endpoint in the Umi instantiation + +```typescript +const umi = createUmi( + "https://devnet.helius-rpc.com/?api-key=YOUR-HELIUS-API-KEY", +); +``` + +To use the Read API to fetch a specific cNFT, you need to have the cNFT’s asset +ID. However, after minting cNFTs, you’ll have at most two pieces of information: 1. The transaction signature 2. The leaf index (possibly) @@ -562,8 +504,8 @@ ID. However, after minting cNFTs, you'll have at most two pieces of information: The only real guarantee is that you'll have the transaction signature. It is **possible** to locate the leaf index from there, but it involves some fairly complex parsing. The short story is you must retrieve the relevant instruction -logs from the Noop program and parse them to find the leaf index. We'll cover -this more in depth in a future lesson. For now, we'll assume you know the leaf +logs from the `Noop program` and parse them to find the leaf index. We’ll cover +this more in depth in a future lesson. For now, we’ll assume you know the leaf index. This is a reasonable assumption for most mints given that the minting will be @@ -579,32 +521,32 @@ ID and the following seeds: 2. The Merkle tree address 3. The leaf index -The indexer essentially observes transaction logs from the Noop program as they -happen and stores the cNFT metadata that was hashed and stored in the Merkle -tree. This enables them to surface that data when requested. This asset id is -what the indexer uses to identify the particular asset. +The indexer essentially observes transaction logs from the `Noop program` as +they happen and stores the cNFT metadata that was hashed and stored in the +Merkle tree. This enables them to surface that data when requested. This asset +ID is what the indexer uses to identify the particular asset. -For simplicity, you can just use the `getLeafAssetId` helper function from the -Bubblegum SDK. With the asset ID, fetching the cNFT is fairly straightforward. -Simply use the `getAsset` method provided by the supporting RPC provider: +For simplicity, you can just use the `findLeafAssetIdPda` helper function from +the Bubblegum library. ```typescript -const assetId = await getLeafAssetId(treeAddress, new BN(leafIndex)); -const response = await fetch(process.env.RPC_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - id: "my-id", - method: "getAsset", - params: { - id: assetId, - }, - }), +const [assetId, bump] = await findLeafAssetIdPda(umi, { + merkleTree, + leafIndex, +}); +``` + +With the asset ID, fetching the cNFT is fairly straightforward. Simply use the +`getAsset` method provided by the supporting RPC provider and the `dasApi` +library: + +```typescript +const [assetId, bump] = await findLeafAssetIdPda(umi, { + merkleTree, + leafIndex, }); -const { result } = await response.json(); -console.log(JSON.stringify(result, null, 2)); +const rpcAsset = await umi.rpc.getAsset(assetId); ``` This will return a JSON object that is comprehensive of what a traditional NFT's @@ -662,138 +604,17 @@ In broad terms, this involves a five step process: 4. Prepare the asset proof as a list of `AccountMeta` objects 5. Build and send the Bubblegum transfer instruction -The first two steps are very similar. Using your supporting RPC provider, use -the `getAsset` and `getAssetProof` methods to fetch the asset data and proof, -respectively. +Fortunately, we can make use of the `transfer` method which takes care of all +these steps. ```typescript -const assetDataResponse = await fetch(process.env.RPC_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - id: "my-id", - method: "getAsset", - params: { - id: assetId, - }, - }), -}); -const assetData = (await assetDataResponse.json()).result; - -const assetProofResponse = await fetch(process.env.RPC_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - id: "my-id", - method: "getAssetProof", - params: { - id: assetId, - }, - }), -}); -const assetProof = (await assetProofResponse.json()).result; -``` - -The third step is to fetch the Merkle tree account. The simplest way to do this -is using the `ConcurrentMerkleTreeAccount` type from -`@solana/spl-account-compression`: +const assetWithProof = await getAssetWithProof(umi, assetId); -```typescript -const treePublicKey = new PublicKey(assetData.compression.tree); - -const treeAccount = await ConcurrentMerkleTreeAccount.fromAccountAddress( - connection, - treePublicKey, -); -``` - -Step four is the most conceptually challenging step. Using the three pieces of -information gathered, you'll need to assemble the proof path for the cNFT's -corresponding leaf. The proof path is represented as accounts passed to the -program instruction. The program uses each of the account addresses as proof -nodes to prove the leaf data is what you say it is. - -The full proof is provided by the indexer as shown above in `assetProof`. -However, you can exclude the same number of tail-end accounts from the proof as -the depth of the canopy. - -```typescript -const canopyDepth = treeAccount.getCanopyDepth() || 0; - -const proofPath: AccountMeta[] = assetProof.proof - .map((node: string) => ({ - pubkey: new PublicKey(node), - isSigner: false, - isWritable: false, - })) - .slice(0, assetProof.proof.length - canopyDepth); -``` - -Finally, you can assemble the transfer instruction. The instruction helper -function, `createTransferInstruction`, requires the following arguments: - -- `accounts` - a list of instruction accounts, as expected; they are as follows: - - `merkleTree` - the Merkle tree account - - `treeAuthority` - the Merkle tree authority - - `leafOwner` - the owner of the leaf (cNFT) in question - - `leafDelegate` - the delegate of the leaf (cNFT) in question; if no delegate - has been added then this should be the same as `leafOwner` - - `newLeafOwner` - the address of the new owner post-transfer - - `logWrapper` - the program to use to expose the data to indexers through - logs; this should be the address of the SPL Noop program unless you have - some other custom implementation - - `compressionProgram` - the compression program to use; this should be the - address of the SPL State Compression program unless you have some other - custom implementation - - `anchorRemainingAccounts` - this is where you add the proof path -- `args` - additional arguments required by the instruction; they are: - - `root` - the root Merkle tree node from the asset proof; this is provided by - the indexer as a string and must be converted to bytes first - - `dataHash` - the hash of the asset data retrieved from the indexer; this is - provided by the indexer as a string and must be converted to bytes first - - `creatorHash` - the hash of the cNFT creator as retrieved from the indexer; - this is provided by the indexer as a string and must be converted to bytes - first - - `nonce` - used to ensure that no two leafs have the same hash; this value - should be the same as `index` - - `index` - the index where the cNFT's leaf is located on the Merkle tree - -An example of this is shown below. Note that the first 3 lines of code grab -additional information nested in the objects shown previously so they are ready -to go when assembling the instruction itself. - -```typescript -const treeAuthority = treeAccount.getAuthority(); -const leafOwner = new PublicKey(assetData.ownership.owner); -const leafDelegate = assetData.ownership.delegate - ? new PublicKey(assetData.ownership.delegate) - : leafOwner; - -const transferIx = createTransferInstruction( - { - merkleTree: treePublicKey, - treeAuthority, - leafOwner, - leafDelegate, - newLeafOwner: receiver, - logWrapper: SPL_NOOP_PROGRAM_ID, - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - anchorRemainingAccounts: proofPath, - }, - { - root: [...new PublicKey(assetProof.root.trim()).toBytes()], - dataHash: [ - ...new PublicKey(assetData.compression.data_hash.trim()).toBytes(), - ], - creatorHash: [ - ...new PublicKey(assetData.compression.creator_hash.trim()).toBytes(), - ], - nonce: assetData.compression.leaf_id, - index: assetData.compression.leaf_id, - }, -); +await transfer(umi, { + ...assetWithProof, + leafOwner: currentLeafOwner, + newLeafOwner: newLeafOwner.publicKey, +}).sendAndConfirm(umi); ``` ### Conclusion @@ -803,14 +624,8 @@ fully comprehensive. You can also use Bubblegum to do things like burn, verify, delegate, and more. We won't go through these, but these instructions are similar to the mint and transfer process. If you need this additional functionality, take a look at the -[Bubblegum client source code](https://github.com/metaplex-foundation/mpl-bubblegum/tree/main/clients/js-solita) -and leverage the helper functions it provides. - -Keep in mind that compression is fairly new. Available tooling will evolve -rapidly but the principles you've learned in this lesson will likely remain the -same. These principles can also be broadened to arbitrary state compression, so -be sure to master them here so you're ready for more fun stuff in future -lessons! +[Bubblegum docs](https://developers.metaplex.com/bubblegum) on how to leverage +the helper functions it provides. ## Lab @@ -818,542 +633,400 @@ Let's jump in and practice creating and working with cNFTs. Together, we'll build as simple a script as possible that will let us mint a cNFT collection from a Merkle tree. -#### 1. Get the starter code +#### 1. Create a new project + +To begin create and initialize an empty NPM project and change directory into +it. -First things first, clone the starter code from the `starter` branch of our -[cNFT lab repository](https://github.com/Unboxed-Software/solana-cnft-demo). +```bash +mkdir cnft-demo +npm init -y +cd cnft-demo +``` -`git clone https://github.com/Unboxed-Software/solana-cnft-demo.git` +Install all the required dependencies -`cd solana-cnft-demo` +```bash +npm i @solana/web3.js @solana-developers/helpers@2.5.2 @metaplex-foundation/mpl-token-metadata @metaplex-foundation/mpl-bubblegum @metaplex-foundation/digital-asset-standard-api @metaplex-foundation/umi-bundle-defaults -`npm install` +npm i --save-dev esrun +``` -Take some time to familiarize yourself with the starter code provided. Most -important are the helper functions provided in `utils.ts` and the URIs provided -in `uri.ts`. +In this first script, we will learn about creating a tree, hence let's create +the file `create-tree.ts` -The `uri.ts` file provides 10k URIs that you can use for the offchain portion of -your NFT metadata. You can, of course, create your own metadata. But this lesson -isn't explicitly about preparing metadata so we've provided some for you. +```bash +mkdir src && touch src/create-tree.ts +``` -The `utils.ts` file has a few helper functions to keep you from writing more -unnecessary boilerplate than you need to. They are as follows: +This Umi instantiation code will be repeated in a lot of files, so feel free to +create a wrapper file to instantiate it: -- `getOrCreateKeypair` will create a new keypair for you and save it to a `.env` - file, or if there's already a private key in the `.env` file it will - initialize a keypair from that. -- `airdropSolIfNeeded` will airdrop some Devnet SOL to a specified address if - that address's balance is below 1 SOL. -- `createNftMetadata` will create the NFT metadata for a given creator public - key and index. The metadata it's getting is just dummy metadata using the URI - corresponding to the provided index from the `uri.ts` list of URIs. -- `getOrCreateCollectionNFT` will fetch the collection NFT from the address - specified in `.env` or if there is none it will create a new one and add the - address to `.env`. +```typescript filename="create-tree.ts" +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { createTree, mplBubblegum } from "@metaplex-foundation/mpl-bubblegum"; +import { generateSigner, keypairIdentity } from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { + getExplorerLink, + getKeypairFromFile, +} from "@solana-developers/helpers"; +import { clusterApiUrl } from "@solana/web3.js"; -Finally, there's some boilerplate in `index.ts` that calls creates a new Devnet -connection, calls `getOrCreateKeypair` to initialize a “wallet,” and calls -`airdropSolIfNeeded` to fund the wallet if its balance is low. +const umi = createUmi(clusterApiUrl("devnet")); -We will be writing all of our code in the `index.ts`. +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); +``` + +In the code above, we load the user's keypair wallet from the system wallet +located at `.config/solana/id.json`, instantiate a new Umi instance and assign +the keypair to it. We also assign the Bubblegum and dasApi plugins to it as +well. #### 2. Create the Merkle tree account -We'll start by creating the Merkle tree account. Let's encapsulate this in a -function that will eventually create _and_ initialize the account. We'll put it -below our `main` function in `index.ts`. Let's call it -`createAndInitializeTree`. For this function to work, it will need the following -parameters: - -- `connection` - a `Connection` to use for interacting with the network. -- `payer` - a `Keypair` that will pay for transactions. -- `maxDepthSizePair` - a `ValidDepthSizePair`. This type comes from - `@solana/spl-account-compression`. It's a simple object with properties - `maxDepth` and `maxBufferSize` that enforces a valid combination of the two - values. -- `canopyDepth` - a number for the canopy depth In the body of the function, - we'll generate a new address for the tree, then create the instruction for - allocating a new Merkle tree account by calling `createAllocTreeIx` from - `@solana/spl-account-compression`. +We’ll start by creating the Merkle tree account. To do this we will use the +`createTree` method from Metaplex Bubblegum program. -```typescript -async function createAndInitializeTree( - connection: Connection, - payer: Keypair, - maxDepthSizePair: ValidDepthSizePair, - canopyDepth: number, -) { - const treeKeypair = Keypair.generate(); - - const allocTreeIx = await createAllocTreeIx( - connection, - treeKeypair.publicKey, - payer.publicKey, - maxDepthSizePair, - canopyDepth, - ); -} +This function takes in three default values + +- `merkleTree` - The Merkle tree account address +- `maxDepth` - Determines the max number of leaves the tree will hold and + therefore the max number of cNFTs that the tree can contain. +- `maxBufferSize` - Determines how many concurrent changes can occur in the tree + in parallel. + +You can also supply in optional fields such as + +- `treeCreator` - The address of the tree authority, defaults to current + `umi.identity` instance. +- `public` - Determines whether anyone else apart from the tree creator will be + able to mint cNFTs from the tree. + +```typescript filename="create-tree.ts" +const merkleTree = generateSigner(umi); +const builder = await createTree(umi, { + merkleTree, + maxDepth: 14, + maxBufferSize: 64, +}); +await builder.sendAndConfirm(umi); + +let explorerLink = getExplorerLink("address", merkleTree.publicKey, "devnet"); +console.log(`Explorer link: ${explorerLink}`); +console.log("Merkle tree address is :", merkleTree.publicKey); +console.log("✅ Finished successfully!"); ``` -#### 3. Use Bubblegum to initialize the Merkle tree and create the tree config account - -With the instruction for creating the tree ready to go, we can create an -instruction for invoking `create_tree` on the Bubblegum program. This will -initialize the Merkle tree account _and_ create a new tree config account on the -Bubblegum program. - -This instruction needs us to provide the following: - -- `accounts` - an object of required accounts; this includes: - - `treeAuthority` - this should be a PDA derived with the Merkle tree address - and the Bubblegum program - - `merkleTree` - the address of the Merkle tree - - `payer` - the transaction fee payer - - `treeCreator` - the address of the tree creator; we'll make this the same as - `payer` - - `logWrapper` - make this the `SPL_NOOP_PROGRAM_ID` - - `compressionProgram` - make this the `SPL_ACCOUNT_COMPRESSION_PROGRAM_ID` -- `args` - a list of instruction arguments; this includes: - - `maxBufferSize` - the buffer size from our function's `maxDepthSizePair` - parameter - - `maxDepth` - the max depth from our function's `maxDepthSizePair` parameter - - `public` - whether or no the tree should be public; we'll set this to - `false` - -Finally, we can add both instructions to a transaction and submit the -transaction. Keep in mind that the transaction needs to be signed by both the -`payer` and the `treeKeypair`. +Run the `create-tree.ts` script using esrun -```typescript -async function createAndInitializeTree( - connection: Connection, - payer: Keypair, - maxDepthSizePair: ValidDepthSizePair, - canopyDepth: number, -) { - const treeKeypair = Keypair.generate(); - - const allocTreeIx = await createAllocTreeIx( - connection, - treeKeypair.publicKey, - payer.publicKey, - maxDepthSizePair, - canopyDepth, - ); - - const [treeAuthority, _bump] = PublicKey.findProgramAddressSync( - [treeKeypair.publicKey.toBuffer()], - BUBBLEGUM_PROGRAM_ID, - ); - - const createTreeIx = createCreateTreeInstruction( - { - treeAuthority, - merkleTree: treeKeypair.publicKey, - payer: payer.publicKey, - treeCreator: payer.publicKey, - logWrapper: SPL_NOOP_PROGRAM_ID, - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - }, - { - maxBufferSize: maxDepthSizePair.maxBufferSize, - maxDepth: maxDepthSizePair.maxDepth, - public: false, - }, - ); - - const tx = new Transaction().add(allocTreeIx, createTreeIx); - tx.feePayer = payer.publicKey; - - try { - const txSignature = await sendAndConfirmTransaction( - connection, - tx, - [treeKeypair, payer], - { - commitment: "confirmed", - skipPreflight: true, - }, - ); - - console.log(`https://explorer.solana.com/tx/${txSignature}?cluster=devnet`); - - console.log("Tree Address:", treeKeypair.publicKey.toBase58()); - - return treeKeypair.publicKey; - } catch (err: any) { - console.error("\nFailed to create Merkle tree:", err); - throw err; - } -} +```bash +npx esrun create-tree.ts ``` -If you want to test what you have so far, feel free to call -`createAndInitializeTree` from `main` and provide small values for the max depth -and max buffer size. +Make sure to remember the Merkle tree address as we will be using it in the next +step when minting compressed NFTs. -```typescript -async function main() { - const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); - const wallet = await getOrCreateKeypair("Wallet_1"); - await airdropSolIfNeeded(wallet.publicKey); - - const maxDepthSizePair: ValidDepthSizePair = { - maxDepth: 3, - maxBufferSize: 8, - }; - - const canopyDepth = 0; - - const treeAddress = await createAndInitializeTree( - connection, - wallet, - maxDepthSizePair, - canopyDepth, - ); -} +Your output will be similar to this + +```bash +Explorer link: https://explorer.solana.com/address/ZwzNxXw83PUmWSypXmqRH669gD3hF9rEjHWPpVghr5h?cluster=devnet +Merkle tree address is : ZwzNxXw83PUmWSypXmqRH669gD3hF9rEjHWPpVghr5h +✅ Finished successfully! ``` -Keep in mind that Devnet SOL is throttled so if you test too many times you -might run out of Devnet SOL before we get to minting. To test, in your terminal -run the following: +Congratulations! You've created a Bubblegum tree. Follow the Explorer link to +make sure that the process finished successfully, -`npm run start` +![Solana Explorer with details about created Merkle tree](/public/assets/courses/unboxed/solana-explorer-create-tree.png) -#### 4. Mint cNFTs to your tree +#### 3. Mint cNFTs to your tree Believe it or not, that's all you needed to do to set up your tree to compressed NFTs! Now let's turn our attention to minting. -First, let's declare a function called `mintCompressedNftToCollection`. It will -need the following parameters: - -- `connection` - a `Connection` to use for interacting with the network. -- `payer` - a `Keypair` that will pay for transactions. -- `treeAddress` - the Merkle tree's address -- `collectionDetails` - the details of the collection as type - `CollectionDetails` from `utils.ts` -- `amount` - the number of cNFTs to mint - -The body of this function will do the following: - -1. Derive the tree authority just like before. Again, this is a PDA derived from - the Merkle tree address and the Bubblegum program. -2. Derive the `bubblegumSigner`. This is a PDA derived from the string - `"collection_cpi"` and the Bubblegum program and is essential for minting to - a collection. -3. Create the cNFT metadata by calling `createNftMetadata` from our `utils.ts` - file. -4. Create the mint instruction by calling `createMintToCollectionV1Instruction` - from the Bubblegum SDK. -5. Build and send a transaction with the mint instruction -6. Repeat steps 3-6 `amount` number of times - -The `createMintToCollectionV1Instruction` takes two arguments: `accounts` and -`args`. The latter is simply the NFT metadata. As with all complex instructions, -the primary hurdle is knowing which accounts to provide. So let's go through -them real quick: - -- `payer` - the account that will pay for the transaction fees, rent, etc. -- `merkleTree` - the Merkle tree account -- `treeAuthority` - the tree authority; should be the same PDA you derived - previously -- `treeDelegate` - the tree delegate; this is usually the same as the tree - creator -- `leafOwner` - the desired owner of the compressed NFT being minted -- `leafDelegate` - the desired delegate of the compressed NFT being minted; this - is usually the same as the leaf owner -- `collectionAuthority` - the authority of the collection NFT -- `collectionAuthorityRecordPda` - optional collection authority record PDA; - there typically is none, in which case you should put the Bubblegum program - address -- `collectionMint` - the mint account for the collection NFT -- `collectionMetadata` - the metadata account for the collection NFT -- `editionAccount` - the master edition account of the collection NFT -- `compressionProgram` - the compression program to use; this should be the - address of the SPL State Compression program unless you have some other custom - implementation -- `logWrapper` - the program to use to expose the data to indexers through logs; - this should be the address of the SPL Noop program unless you have some other - custom implementation -- `bubblegumSigner` - a PDA used by the Bubblegrum program to handle collection - verification -- `tokenMetadataProgram` - the token metadata program that was used for the - collection NFT; this is usually always the Metaplex Token Metadata program - -When you put it all together, this is what it'll look like: +First, let's create a new file called `mint-compressed-nft-to-collection.ts`, +add our imports and instantiate Umi -```typescript -async function mintCompressedNftToCollection( - connection: Connection, - payer: Keypair, - treeAddress: PublicKey, - collectionDetails: CollectionDetails, - amount: number, -) { - // Derive the tree authority PDA ('TreeConfig' account for the tree account) - const [treeAuthority] = PublicKey.findProgramAddressSync( - [treeAddress.toBuffer()], - BUBBLEGUM_PROGRAM_ID, - ); - - // Derive the bubblegum signer, used by the Bubblegum program to handle "collection verification" - // Only used for `createMintToCollectionV1` instruction - const [bubblegumSigner] = PublicKey.findProgramAddressSync( - [Buffer.from("collection_cpi", "utf8")], - BUBBLEGUM_PROGRAM_ID, - ); - - for (let i = 0; i < amount; i++) { - // Compressed NFT Metadata - const compressedNFTMetadata = createNftMetadata(payer.publicKey, i); - - // Create the instruction to "mint" the compressed NFT to the tree - const mintIx = createMintToCollectionV1Instruction( - { - payer: payer.publicKey, // The account that will pay for the transaction - merkleTree: treeAddress, // The address of the tree account - treeAuthority, // The authority of the tree account, should be a PDA derived from the tree account address - treeDelegate: payer.publicKey, // The delegate of the tree account, should be the same as the tree creator by default - leafOwner: payer.publicKey, // The owner of the compressed NFT being minted to the tree - leafDelegate: payer.publicKey, // The delegate of the compressed NFT being minted to the tree - collectionAuthority: payer.publicKey, // The authority of the "collection" NFT - collectionAuthorityRecordPda: BUBBLEGUM_PROGRAM_ID, // Must be the Bubblegum program id - collectionMint: collectionDetails.mint, // The mint of the "collection" NFT - collectionMetadata: collectionDetails.metadata, // The metadata of the "collection" NFT - editionAccount: collectionDetails.masterEditionAccount, // The master edition of the "collection" NFT - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - logWrapper: SPL_NOOP_PROGRAM_ID, - bubblegumSigner, - tokenMetadataProgram: TOKEN_METADATA_PROGRAM_ID, - }, - { - metadataArgs: Object.assign(compressedNFTMetadata, { - collection: { key: collectionDetails.mint, verified: false }, - }), - }, - ); - - try { - // Create new transaction and add the instruction - const tx = new Transaction().add(mintIx); - - // Set the fee payer for the transaction - tx.feePayer = payer.publicKey; - - // Send the transaction - const txSignature = await sendAndConfirmTransaction( - connection, - tx, - [payer], - { commitment: "confirmed", skipPreflight: true }, - ); - - console.log( - `https://explorer.solana.com/tx/${txSignature}?cluster=devnet`, - ); - } catch (err) { - console.error("\nFailed to mint compressed NFT:", err); - throw err; +```typescript filename="mint-compressed-nft-to-collection.ts" +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { + findLeafAssetIdPda, + LeafSchema, + mintToCollectionV1, + mplBubblegum, + parseLeafFromMintToCollectionV1Transaction, +} from "@metaplex-foundation/mpl-bubblegum"; +import { + keypairIdentity, + publicKey as UMIPublicKey, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { getKeypairFromFile } from "@solana-developers/helpers"; +import { clusterApiUrl } from "@solana/web3.js"; + +const umi = createUmi(clusterApiUrl("devnet")); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); +``` + +I am going to be +[recycling a Collection NFT](https://explorer.solana.com/address/D2zi1QQmtZR5fk7wpA1Fmf6hTY2xy8xVMyNgfq6LsKy1?cluster=devnet) +I already created in the NFTs with Metaplex lesson, but if you'd like to create +a new collection for this lesson, check out the code +[on this repo](https://github.com/solana-developers/professional-education/blob/main/labs/metaplex-umi/create-collection.ts) + + +Find the code to create a Metaplex Collection NFT in our [NFTs with Metaplex lesson](https://solana.com/developers/courses/tokens-and-nfts/nfts-with-metaplex#add-the-nft-to-a-collection). + + +To mint a compressed NFT to a collection we will need + +- `leafOwner` - The recipient of the compressed NFT + +- `merkleTree` - The Merkle tree address we created in the previous step + +- `collection` - The collection our cNFT will belong to. This is not required, + and you can leave it out if your cNFT doesn't belong to a collection. + +- `metadata` - Your offchain metadata. This lesson won't focus onto how to + prepare your metadata, but you can check out the + [recommended structure from Metaplex](https://developers.metaplex.com/token-metadata/token-standard#the-non-fungible-standard). + +Our cNFT will use this structure we already prepared earlier. + +```json filename="nft.json" +{ + "name": "My NFT", + "symbol": "MN", + "description": "My NFT Description", + "image": "https://lycozm33rkk5ozjqldiuzc6drazmdp5d5g3g7foh3gz6rz5zp7va.arweave.net/XgTss3uKlddlMFjRTIvDiDLBv6Pptm-Vx9mz6Oe5f-o", + "attributes": [ + { + "trait_type": "Background", + "value": "transparent" + }, + { + "trait_type": "Shape", + "value": "sphere" } - } + ] } ``` -This is a great point to test with a small tree. Simply update `main` to call -`getOrCreateCollectionNFT` then `mintCompressedNftToCollection`: +Putting it all into code, we will have -```typescript -async function main() { - const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); - const wallet = await getOrCreateKeypair("Wallet_1"); - await airdropSolIfNeeded(wallet.publicKey); - - const maxDepthSizePair: ValidDepthSizePair = { - maxDepth: 3, - maxBufferSize: 8, - }; - - const canopyDepth = 0; - - const treeAddress = await createAndInitializeTree( - connection, - wallet, - maxDepthSizePair, - canopyDepth, - ); - - const collectionNft = await getOrCreateCollectionNFT(connection, wallet); - - await mintCompressedNftToCollection( - connection, - wallet, - treeAddress, - collectionNft, - 2 ** maxDepthSizePair.maxDepth, - ); -} +```typescript filename="mint-compressed-nft-to-collection.ts" +const merkleTree = UMIPublicKey("ZwzNxXw83PUmWSypXmqRH669gD3hF9rEjHWPpVghr5h"); + +const collectionMint = UMIPublicKey( + "D2zi1QQmtZR5fk7wpA1Fmf6hTY2xy8xVMyNgfq6LsKy1", +); + +const uintSig = await( + await mintToCollectionV1(umi, { + leafOwner: umi.identity.publicKey, + merkleTree, + collectionMint, + metadata: { + name: "My NFT", + uri: "https://chocolate-wet-narwhal-846.mypinata.cloud/ipfs/QmeBRVEmASS3pyK9YZDkRUtAham74JBUZQE3WD4u4Hibv9", + sellerFeeBasisPoints: 0, // 0% + collection: { key: collectionMint, verified: false }, + creators: [ + { + address: umi.identity.publicKey, + verified: false, + share: 100, + }, + ], + }, + }).sendAndConfirm(umi), +).signature; + +const b64Sig = base58.deserialize(uintSig); +console.log(b64Sig); ``` -Again, to run, in your terminal type: `npm run start` +The difference between the first statement is that we are returning the byte +array representing the transaction signature. + +We need this has in order to be able to get the leaf schema and with this schema +derive the asset ID. -#### 5. Read existing cNFT data +```typescript filename="mint-compressed-nft-to-collection.ts" +const leaf: LeafSchema = await parseLeafFromMintToCollectionV1Transaction( + umi, + uintSig, +); +const assetId = findLeafAssetIdPda(umi, { + merkleTree, + leafIndex: leaf.nonce, +})[0]; +``` -Now that we've written code to mint cNFTs, let's see if we can actually fetch -their data. This is tricky because the onchain data is just the Merkle tree -account, the data from which can be used to verify existing information as -accurate but is useless in conveying what the information is. +With everything in place, we can now run our script +`mint-compressed-nft-to-collection.ts` -Let's start by declaring a function `logNftDetails` that takes as parameters -`treeAddress` and `nftsMinted`. +```bash +npx esrun mint-compressed-nft-to-collection.ts +``` + +Your output should resemble + +```bash +asset id: D4A8TYkKE5NzkqBQ4mPybgFbAUDN53fwJ64b8HwEEuUS +✅ Finished successfully! +``` + +We aren't returning the Explorer link because this address won't exists on the +Solana state but is indexed by RPCs that support the DAS API. -At this point we don't actually have a direct identifier of any kind that points -to our cNFT. To get that, we'll need to know the leaf index that was used when -we minted our cNFT. We can then use that to derive the asset ID used by the Read -API and subsequently use the Read API to fetch our cNFT data. +In the next step we will query this address to fetch out cNFT details. -In our case, we created a non-public tree and minted 8 cNFTs, so we know that -the leaf indexes used were 0-7. With this, we can use the `getLeafAssetId` -function from `@metaplex-foundation/mpl-bubblegum` to get the asset ID. +#### 4. Read existing cNFT data -Finally, we can use an RPC that supports the -[Read API](https://solana.com/developers/guides/javascript/compressed-nfts) to -fetch the asset. We'll be using -[Helius](https://docs.helius.dev/compression-and-das-api/digital-asset-standard-das-api), -but feel free to choose your own RPC provider. To use Helius, you'll need to get -a free API Key from [the Helius website](https://dev.helius.xyz/). Then add your -`RPC_URL` to your `.env` file. For example: +Now that we’ve written code to mint cNFTs, let’s see if we can actually fetch +their data. + +Create a new file `fetch-cnft-details.ts` ```bash -## Add this -RPC_URL=https://devnet.helius-rpc.com/?api-key=YOUR_API_KEY +fetch-cnft-details.ts ``` -Then simply issue a POST request to your provided RPC URL and put the `getAsset` -information in the body: +Import our packages and instantiate Umi. Here we will finally make use of the +`umi.use(dasApi())` we've been importing. -```typescript -async function logNftDetails(treeAddress: PublicKey, nftsMinted: number) { - for (let i = 0; i < nftsMinted; i++) { - const assetId = await getLeafAssetId(treeAddress, new BN(i)); - console.log("Asset ID:", assetId.toBase58()); - const response = await fetch(process.env.RPC_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - id: "my-id", - method: "getAsset", - params: { - id: assetId, - }, - }), - }); - const { result } = await response.json(); - console.log(JSON.stringify(result, null, 2)); - } -} +In the instantiation of Umi, we are going to make a change to our connection +endpoint and use an RPC that supports the DAS API. + +Be sure to update this with your Helius API keys which you can get from the +[developer dashboard page](https://dashboard.helius.dev/signup?redirectTo=onboarding) + +```typescript filename="fetch-cnft-details.ts" +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { mplBubblegum } from "@metaplex-foundation/mpl-bubblegum"; +import { + keypairIdentity, + publicKey as UMIPublicKey, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { getKeypairFromFile } from "@solana-developers/helpers"; + +const umi = createUmi( + "https://devnet.helius-rpc.com/?api-key=YOUR-HELIUS-API-KEY", +); + +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); + +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); + +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); ``` -Helius essentially observes transaction logs as they happen and stores the NFT -metadata that was hashed and stored in the Merkle tree. This enables them to -surface that data when requested. +Fetching a compressed NFT details is as simple as calling the `getAsset` method +with the `assetId` from the previous step. + +```typescript filename="fetch-cnft-details.ts" +const assetId = UMIPublicKey("D4A8TYkKE5NzkqBQ4mPybgFbAUDN53fwJ64b8HwEEuUS"); -If we add a call to this function at the end of `main` and re-run your script, -the data we get back in the console is very comprehensive. It includes all of -the data you'd expect in both the onchain and offchain portion of a traditional -NFT. You can find the cNFT's attributes, files, ownership and creator -information, and more. +// @ts-ignore +const rpcAsset = await umi.rpc.getAsset(assetId); +console.log(rpcAsset); +``` + +Let’s start by declaring a function `logNftDetails` that takes as parameters +`treeAddress` and `nftsMinted`. + +The output of our console.log would output ```json { - "interface": "V1_NFT", - "id": "48Bw561h1fGFK4JGPXnmksHp2fpniEL7hefEc6uLZPWN", - "content": { - "$schema": "https://schema.metaplex.com/nft1.0.json", - "json_uri": "https://raw.githubusercontent.com/Unboxed-Software/rgb-png-generator/master/assets/183_89_78/183_89_78.json", - "files": [ - { - "uri": "https://raw.githubusercontent.com/Unboxed-Software/rgb-png-generator/master/assets/183_89_78/183_89_78.png", - "cdn_uri": "https://cdn.helius-rpc.com/cdn-cgi/image//https://raw.githubusercontent.com/Unboxed-Software/rgb-png-generator/master/assets/183_89_78/183_89_78.png", - "mime": "image/png" - } - ], - "metadata": { - "attributes": [ - { - "value": "183", - "trait_type": "R" - }, - { - "value": "89", - "trait_type": "G" - }, - { - "value": "78", - "trait_type": "B" - } - ], - "description": "Random RGB Color", - "name": "CNFT", - "symbol": "CNFT" + interface: 'V1_NFT', + id: 'D4A8TYkKE5NzkqBQ4mPybgFbAUDN53fwJ64b8HwEEuUS', + content: { + '$schema': 'https://schema.metaplex.com/nft1.0.json', + json_uri: 'https://chocolate-wet-narwhal-846.mypinata.cloud/ipfs/QmeBRVEmASS3pyK9YZDkRUtAham74JBUZQE3WD4u4Hibv9', + files: [ [Object] ], + metadata: { + attributes: [Array], + description: 'My NFT Description', + name: 'My NFT', + symbol: '', + token_standard: 'NonFungible' }, - "links": { - "image": "https://raw.githubusercontent.com/Unboxed-Software/rgb-png-generator/master/assets/183_89_78/183_89_78.png" + links: { + image: 'https://lycozm33rkk5ozjqldiuzc6drazmdp5d5g3g7foh3gz6rz5zp7va.arweave.net/XgTss3uKlddlMFjRTIvDiDLBv6Pptm-Vx9mz6Oe5f-o' } }, - "authorities": [ + authorities: [ { - "address": "DeogHav5T2UV1zf5XuH4DTwwE5fZZt7Z4evytUUtDtHd", - "scopes": ["full"] + address: '4sk8Ds1T4bYnN4j23sMbVyHYABBXQ53NoyzVrXGd3ja4', + scopes: [Array] } ], - "compression": { - "eligible": false, - "compressed": true, - "data_hash": "3RsXHMBDpUPojPLZuMyKgZ1kbhW81YSY3PYmPZhbAx8K", - "creator_hash": "Di6ufEixhht76sxutC9528H7PaWuPz9hqTaCiQxoFdr", - "asset_hash": "2TwWjQPdGc5oVripPRCazGBpAyC5Ar1cia8YKUERDepE", - "tree": "7Ge8nhDv2FcmnpyfvuWPnawxquS6gSidum38oq91Q7vE", - "seq": 8, - "leaf_id": 7 + compression: { + eligible: false, + compressed: true, + data_hash: '2UgKwnTkguefRg3P5J33UPkNebunNMFLZTuqvnBErqhr', + creator_hash: '4zKvSQgcRhJFqjQTeCjxuGjWydmWTBVfCB5eK4YkRTfm', + asset_hash: '2DwKkMFYJHDSgTECiycuBApMt65f3N1ZwEbRugRZymwJ', + tree: 'ZwzNxXw83PUmWSypXmqRH669gD3hF9rEjHWPpVghr5h', + seq: 4, + leaf_id: 3 }, - "grouping": [ + grouping: [ { - "group_key": "collection", - "group_value": "9p2RqBUAadMznAFiBEawMJnKR9EkFV98wKgwAz8nxLmj" + group_key: 'collection', + group_value: 'D2zi1QQmtZR5fk7wpA1Fmf6hTY2xy8xVMyNgfq6LsKy1' } ], - "royalty": { - "royalty_model": "creators", - "target": null, - "percent": 0, - "basis_points": 0, - "primary_sale_happened": false, - "locked": false + royalty: { + royalty_model: 'creators', + target: null, + percent: 0, + basis_points: 0, + primary_sale_happened: false, + locked: false }, - "creators": [ + creators: [ { - "address": "HASk3AoTPAvC1KnXSo6Qm73zpkEtEhbmjLpXLgvyKBkR", - "share": 100, - "verified": false + address: '4kg8oh3jdNtn7j2wcS7TrUua31AgbLzDVkBZgTAe44aF', + share: 100, + verified: false } ], - "ownership": { - "frozen": false, - "delegated": false, - "delegate": null, - "ownership_model": "single", - "owner": "HASk3AoTPAvC1KnXSo6Qm73zpkEtEhbmjLpXLgvyKBkR" - }, - "supply": { - "print_max_supply": 0, - "print_current_supply": 0, - "edition_nonce": 0 + ownership: { + frozen: false, + delegated: false, + delegate: null, + ownership_model: 'single', + owner: '4kg8oh3jdNtn7j2wcS7TrUua31AgbLzDVkBZgTAe44aF' }, - "mutable": false, - "burnt": false + supply: { print_max_supply: 0, print_current_supply: 0, edition_nonce: null }, + mutable: true, + burnt: false } ``` @@ -1362,274 +1035,85 @@ owner, creator, etc., and more. Be sure to look through the [Helius docs](https://docs.helius.dev/compression-and-das-api/digital-asset-standard-das-api) to see what's available. -#### 6. Transfer a cNFT +#### 5. Transfer a cNFT The last thing we're going to add to our script is a cNFT transfer. Just as with a standard SPL token transfer, security is paramount. Unlike with a standard SPL token transfer, however, to build a secure transfer with state compression of any kind, the program performing the transfer needs the entire asset data. -The program, Bubblegum in this case, needs to be provided with the entire data -that was hashed and stored on the corresponding leaf _and_ needs to be given the -“proof path” for the leaf in question. That makes cNFT transfers a bit trickier -than SPL token transfers. +Fortunately for us can get the asset data with the `getAssetWithProof` method. -Remember, the general steps are: +Le't first create a new file `transfer-asset.ts`, and populate it with the code +for instantiating a new Umi client. -1. Fetch the cNFT's asset data from the indexer -2. Fetch the cNFT's proof from the indexer -3. Fetch the Merkle tree account from the Solana blockchain -4. Prepare the asset proof as a list of `AccountMeta` objects -5. Build and send the Bubblegum transfer instruction +```typescript filename="transfer-asset.ts" +import { dasApi } from "@metaplex-foundation/digital-asset-standard-api"; +import { + getAssetWithProof, + mplBubblegum, + transfer, +} from "@metaplex-foundation/mpl-bubblegum"; +import { + keypairIdentity, + publicKey as UMIPublicKey, +} from "@metaplex-foundation/umi"; +import { createUmi } from "@metaplex-foundation/umi-bundle-defaults"; +import { base58 } from "@metaplex-foundation/umi/serializers"; +import { + getExplorerLink, + getKeypairFromFile, +} from "@solana-developers/helpers"; +import { clusterApiUrl } from "@solana/web3.js"; -Let's start by declaring a `transferNft` function that takes the following: +const umi = createUmi(clusterApiUrl("devnet")); -- `connection` - a `Connection` object -- `assetId` - a `PublicKey` object -- `sender` - a `Keypair` object so we can sign the transaction -- `receiver` - a `PublicKey` object representing the new owner +// load keypair from local file system +// See https://github.com/solana-developers/helpers?tab=readme-ov-file#get-a-keypair-from-a-keypair-file +const localKeypair = await getKeypairFromFile(); -Inside that function, let's fetch the asset data again then also fetch the asset -proof. For good measure, let's wrap everything in a `try catch`. +// convert to Umi compatible keypair +const umiKeypair = umi.eddsa.createKeypairFromSecretKey(localKeypair.secretKey); -```typescript -async function transferNft( - connection: Connection, - assetId: PublicKey, - sender: Keypair, - receiver: PublicKey, -) { - try { - const assetDataResponse = await fetch(process.env.RPC_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - id: "my-id", - method: "getAsset", - params: { - id: assetId, - }, - }), - }); - const assetData = (await assetDataResponse.json()).result; - - const assetProofResponse = await fetch(process.env.RPC_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - id: "my-id", - method: "getAssetProof", - params: { - id: assetId, - }, - }), - }); - const assetProof = (await assetProofResponse.json()).result; - } catch (err: any) { - console.error("\nFailed to transfer nft:", err); - throw err; - } -} +// load the MPL Bubblegum program, dasApi plugin and assign a signer to our umi instance +umi.use(keypairIdentity(umiKeypair)).use(mplBubblegum()).use(dasApi()); ``` -Next, let's fetch the Merkle tree account from the chain, get the canopy depth, -and assemble the proof path. We do this by mapping the asset proof we got from -Helius to a list of `AccountMeta` objects, then removing any proof nodes at the -end that are already cached onchain in the canopy. +We are not ready to transfer our asset. Using the `assetId` for our cNFT, we can +call the `transfer` method from the Bubblegum library -```typescript -async function transferNft( - connection: Connection, - assetId: PublicKey, - sender: Keypair, - receiver: PublicKey -) { - try { - ... - - const treePublicKey = new PublicKey(assetData.compression.tree) - - const treeAccount = await ConcurrentMerkleTreeAccount.fromAccountAddress( - connection, - treePublicKey - ) - - const canopyDepth = treeAccount.getCanopyDepth() || 0 - - const proofPath: AccountMeta[] = assetProof.proof - .map((node: string) => ({ - pubkey: new PublicKey(node), - isSigner: false, - isWritable: false, - })) - .slice(0, assetProof.proof.length - canopyDepth) - } catch (err: any) { - console.error("\nFailed to transfer nft:", err) - throw err - } -} -``` +```typescript filename="transfer-asset.ts" +const assetId = UMIPublicKey("D4A8TYkKE5NzkqBQ4mPybgFbAUDN53fwJ64b8HwEEuUS"); -Finally, we build the instruction using `createTransferInstruction`, add it to a -transaction, then sign and send the transaction. This is what the entire -`transferNft` function looks like when finished: +//@ts-ignore +const assetWithProof = await getAssetWithProof(umi, assetId); -```typescript -async function transferNft( - connection: Connection, - assetId: PublicKey, - sender: Keypair, - receiver: PublicKey, -) { - try { - const assetDataResponse = await fetch(process.env.RPC_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - id: "my-id", - method: "getAsset", - params: { - id: assetId, - }, - }), - }); - const assetData = (await assetDataResponse.json()).result; - - const assetProofResponse = await fetch(process.env.RPC_URL, { - method: "POST", - headers: { "Content-Type": "application/json" }, - body: JSON.stringify({ - jsonrpc: "2.0", - id: "my-id", - method: "getAssetProof", - params: { - id: assetId, - }, - }), - }); - const assetProof = (await assetProofResponse.json()).result; - - const treePublicKey = new PublicKey(assetData.compression.tree); - - const treeAccount = await ConcurrentMerkleTreeAccount.fromAccountAddress( - connection, - treePublicKey, - ); - - const canopyDepth = treeAccount.getCanopyDepth() || 0; - - const proofPath: AccountMeta[] = assetProof.proof - .map((node: string) => ({ - pubkey: new PublicKey(node), - isSigner: false, - isWritable: false, - })) - .slice(0, assetProof.proof.length - canopyDepth); - - const treeAuthority = treeAccount.getAuthority(); - const leafOwner = new PublicKey(assetData.ownership.owner); - const leafDelegate = assetData.ownership.delegate - ? new PublicKey(assetData.ownership.delegate) - : leafOwner; - - const transferIx = createTransferInstruction( - { - merkleTree: treePublicKey, - treeAuthority, - leafOwner, - leafDelegate, - newLeafOwner: receiver, - logWrapper: SPL_NOOP_PROGRAM_ID, - compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, - anchorRemainingAccounts: proofPath, - }, - { - root: [...new PublicKey(assetProof.root.trim()).toBytes()], - dataHash: [ - ...new PublicKey(assetData.compression.data_hash.trim()).toBytes(), - ], - creatorHash: [ - ...new PublicKey(assetData.compression.creator_hash.trim()).toBytes(), - ], - nonce: assetData.compression.leaf_id, - index: assetData.compression.leaf_id, - }, - ); - - const tx = new Transaction().add(transferIx); - tx.feePayer = sender.publicKey; - const txSignature = await sendAndConfirmTransaction( - connection, - tx, - [sender], - { - commitment: "confirmed", - skipPreflight: true, - }, - ); - console.log(`https://explorer.solana.com/tx/${txSignature}?cluster=devnet`); - } catch (err: any) { - console.error("\nFailed to transfer nft:", err); - throw err; - } -} +let uintSig = await( + await transfer(umi, { + ...assetWithProof, + leafOwner: umi.identity.publicKey, + newLeafOwner: UMIPublicKey("J63YroB8AwjDVjKuxjcYFKypVM3aBeQrfrVmNBxfmThB"), + }).sendAndConfirm(umi), +).signature; + +const b64sig = base58.deserialize(uintSig); + +let explorerLink = getExplorerLink("transaction", b64sig, "devnet"); +console.log(`Explorer link: ${explorerLink}`); +console.log("✅ Finished successfully!"); ``` -Lets transfer our first compressed NFT at index 0 to someone else. First we'll -need to spin up another wallet with some funds, then grab the assetID at index 0 -using `getLeafAssetId`. Then we'll do the transfer. Finally, we'll print out the -entire collection using our function `logNftDetails`. You'll note that the NFT -at index zero will now belong to our new wallet in the `ownership` field. +Running our script with `npx esrun transfer-asset.ts`, should output something +similar to this if successful: -```typescript -async function main() { - const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); - const wallet = await getOrCreateKeypair("Wallet_1"); - await airdropSolIfNeeded(wallet.publicKey); - - const maxDepthSizePair: ValidDepthSizePair = { - maxDepth: 3, - maxBufferSize: 8, - }; - - const canopyDepth = 0; - - const treeAddress = await createAndInitializeTree( - connection, - wallet, - maxDepthSizePair, - canopyDepth, - ); - - const collectionNft = await getOrCreateCollectionNFT(connection, wallet); - - await mintCompressedNftToCollection( - connection, - wallet, - treeAddress, - collectionNft, - 2 ** maxDepthSizePair.maxDepth, - ); - - const recieverWallet = await getOrCreateKeypair("Wallet_2"); - const assetId = await getLeafAssetId(treeAddress, new BN(0)); - await airdropSolIfNeeded(recieverWallet.publicKey); - - console.log( - `Transfering ${assetId.toString()} from ${wallet.publicKey.toString()} to ${recieverWallet.publicKey.toString()}`, - ); - - await transferNft(connection, assetId, wallet, recieverWallet.publicKey); - - await logNftDetails(treeAddress, 8); -} +```bash +Explorer link: https://explorer.solana.com/tx/3sNgN7Gnh5FqcJ7ZuUEXFDw5WeojpwkDjdfvTNWy68YCEJUF8frpnUJdHhHFXAtoopsytzkKewh39Rf7phFQ2hCF?cluster=devnet +✅ Finished successfully! ``` -Go ahead and run your script. The whole thing should execute without failing, -and all for close to 0.01 SOL! +Open the explorer link, and scroll to the bottom to observer your tx logs, + +![Solana Explorer showing logs of the transfer cnft instruction](/public/assets/courses/unboxed/solana-explorer-showing-cnft-transfer-logs.png) Congratulations! Now you know how to mint, read, and transfer cNFTs. If you wanted, you could update the max depth, max buffer size, and canopy depth to @@ -1637,12 +1121,10 @@ larger values and as long as you have enough Devnet SOL, this script will let you mint up to 10k cNFTs for a small fraction of what it would cost to mint 10k traditional NFTs. -If you plan to mint a large amount of NFTs you might want -to try and batch these instructions for fewer total transactions. - -If you need more time with this lab, feel free to go through it again and/or -take a look at the solution code on the `solution` branch of the -[lab repo](https://github.com/Unboxed-Software/solana-cnft-demo/tree/solution). +Inspect the cNFT on Solana Explorer! Just like previously, if you have any +issues, you should fix them yourself, but if needed the +[solution code](https://github.com/solana-foundation/compressed-nfts) is +available. ### Challenge diff --git a/content/courses/state-compression/generalized-state-compression.md b/content/courses/state-compression/generalized-state-compression.md index f91169e38..6a74e7c70 100644 --- a/content/courses/state-compression/generalized-state-compression.md +++ b/content/courses/state-compression/generalized-state-compression.md @@ -1,309 +1,374 @@ --- -title: Generalized State Compression +title: Generalized State Compression objectives + objectives: - - Explain the logic flow behind Solana state compression + - Explain the flow of Solana’s state compression logic. - Explain the difference between a Merkle tree and a concurrent Merkle tree - - Implement generic state compression in basic Solana programs + - Implement generic state compression in a basic Solana program + description: - "How state compression - the tech behind compressed NFTs - works, and how to - implement it in your own Solana programs." + Understand how state compression - the technology behind compressed NFTs works + - and learn how to apply it in your Solana programs. --- ## Summary -- State Compression on Solana is most commonly used for compressed NFTs, but - it's possible to use it for arbitrary data -- State Compression lowers the amount of data you have to store onchain by - leveraging Merkle trees. -- Merkle trees store a single hash that represents an entire binary tree of - hashes. Each leaf on a Merkle tree is a hash of that leaf's data. -- Concurrent Merkle trees are a specialized version of Merkle trees that allow - concurrent updates. -- Because data in a state-compressed program is not stored onchain, you have to - user indexers to keep an offchain cache of the data and then verify that data - against the onchain Merkle tree. +- State compression on Solana is primarily used for compressed NFTs (cNFTs), but + it can be applied to any data type +- State Compression lowers the amount of data you have to store onchain using + Merkle trees. +- A Merkle tree compresses data by hashing pairs of data repeatedly until a + single root hash is produced. This root hash is then stored onchain. +- Each leaf on a Merkle tree is a hash of that leaf’s data. +- A concurrent Merkle tree is a specialized version of a Merkle tree. Unlike a + standard Merkle tree, it allows multiple updates simultaneously without + affecting transaction validity. +- Data in a state-compressed program is not stored onchain. So you have to use + indexers to keep an offchain cache of the data. It’s this offchain cache data + that is used to then verify against the onchain Merkle tree. ## Lesson -Previously, we discussed state compression in the context of compressed NFTs. At -the time of writing, compressed NFTs represent the most common use case for -state compression, but it's possible to use state compression within any -program. In this lesson, we'll discuss state compression in more generalized -terms so that you can apply it to any of your programs. +Previously, we talked about state compression in the context of compressed NFTs. + +While compressed NFTs are the main use case for state compression, you can apply +state compression to any Solana program. In this lesson, we’ll discuss state +compression in general terms so you can use it across your Solana projects. ### A theoretical overview of state compression -In traditional programs, data is serialized (typically using borsh) and then -stored directly in an account. This allows the data to be easily read and -written through Solana programs. You can “trust” the data stored in the accounts -because it can't be modified except through the mechanisms surfaced by the -program. - -State compression effectively asserts that the most important piece of this -equation is how “trustworthy” the data is. If all we care about is the ability -to trust that data is what it claims to be, then we can actually get away with -**_not_** storing the data in an account onchain. Instead, we can store hashes -of the data where the hashes can be used to prove or verify the data. The data -hash takes up significantly less storage space than the data itself. We can then -store the actual data somewhere much cheaper and worry about verifying it -against the onchain hash when the data is accessed. - -The specific data structure used by the Solana State Compression program is a -special binary tree structure known as a **concurrent Merkle tree**. This tree -structure hashes pieces of data together in a deterministic way to compute a -single, final hash that gets stored onchain. This final hash is significantly -smaller in size than all the original data combined, hence the “compression.” -The steps to this process are: - -1. Take any piece of data -2. Create a hash of this data -3. Store this hash as a “leaf” at the bottom of the tree -4. Each leaf pair is then hashed together, creating a “branch” -5. Each branch is then hashed together -6. Continually climb the tree and hash adjacent branches together -7. Once at the top of the tree, a final ”root hash” is produced -8. Store the root hash onchain as verifiable proof of the data within each leaf -9. Anyone wanting to verify that the data they have matches the “source of - truth” can go through the same process and compare the final hash without - having to store all the data onchain - -This involves a few rather serious development tradeoffs: - -1. Since the data is no longer stored in an account onchain, it is more - difficult to access. -2. Once the data has been accessed, developers must decide how often their - applications will verify the data against the onchain hash. -3. Any changes to the data will require sending the entirety of the previously - hashed data _and_ the new data into an instruction. Developer may also have - to provide additional data relevant to the proofs required to verify the - original data against the hash. - -Each of these will be a consideration when determining **if**, **when**, and -**how** to implement state compression for your program. +Normally, data in Solana programs is serialized (usually with borsh) and stored +directly in an account. This makes it easy to read and write the data through +the program. The account data is trustworthy because only the program can modify +it. + +However to verify the integrity of the data, then there’s no need to store the +actual data onchain. Instead, we can store hashes of the data, which can be used +to prove or verify its accuracy. This is called _state compression_. + +These hashes take up far less storage space than the original data. The full +data can be stored in a cheaper, offchain location, and only needs to be +verified against the onchain hash when accessed. + +The Solana State Compression program uses a program known as a **concurrent +Merkle tree**. A concurrent Merkle tree is a special kind of binary tree that +deterministically hashes data, i.e. the same inputs will always produce the same +Merkle root. + +The final hash, called a _Merkle root_, is significantly smaller in size than +all the original full data sets combined. This is why it’s called "compression". +And it’s this hash that’s stored onchain. + +**Outlined below are the steps to this process, in order:** + +1. Take a piece of data. +2. Create a hash of that data. +3. Store the hash as a "leaf" at the bottom of the tree. +4. Hash pairs of leaves together to create branches. +5. Hash pairs of branches together. +6. Repeat this process until you reach the top of the tree. +7. The top of the tree contains a final "root hash." +8. Store this root hash onchain as proof of the data. +9. To verify the data, recompute the hashes and compare the final hash to the + onchain root hash. + +This method comes with some trade-offs: + +1. The data isn’t stored onchain, so it’s harder to access. +2. Developers must decide how often to verify the data against the onchain hash. +3. If the data changes, the entire data set must be sent to the program, along + with the new data. You’ll also need proof that the data matches the hash. + +These considerations will guide you when deciding whether, when, and how to +implement state compression in your programs. With that quick overview, let’s go +into more technical detail. #### Concurrent Merkle trees -A **Merkle tree** is a binary tree structure represented by a single hash. Every -leaf node in the structure is a hash of its inner data while every branch is a -hash of its child leaf hashes. In turn, branches are also hashed together until, -eventually, one final root hash remains. - -Since the Merkle tree is represented as a single hash, any modification to leaf -data changes the root hash. This causes an issue when multiple transactions in -the same slot are attempting to modify leaf data. Since these transactions must -execute in series, all but the first will fail since the root hash and proof -passed in will have been invalidated by the first transaction to be executed. In -other words, a standard Merkle tree can only modify a single leaf per slot. In a -hypothetical state-compressed program that relies on a single Merkle tree for -its state, this severely limits throughput. - -This can be solved with a **concurrent Merkle tree**. A concurrent Merkle -tree is a Merkle tree that stores a secure changelog of the most recent changes -along with their root hash and the proof to derive it. When multiple -transactions in the same slot try to modify leaf data, the changelog can be used -as a source of truth to allow for concurrent changes to be made to the tree. - -In other words, while an account storing a Merkle tree would have only the root -hash, a concurrent Merkle tree will also contain additional data that allows -subsequent writes to successfully occur. This includes: - -1. The root hash - The same root hash that a standard Merkle tree has. -2. A changelog buffer - This buffer contains proof data pertinent to recent root - hash changes so that subsequent writes in the same slot can still be - successful. -3. A canopy - When performing an update action on any given leaf, you need the - entire proof path from that leaf to the root hash. The canopy stores - intermediate proof nodes along that path so they don't all have to be passed - into the program from the client. - -As a program architect, you control three values directly related to these three -items. Your choice determines the size of the tree, the cost to create the tree, -and the number of concurrent changes that can be made to the tree: - -1. Max depth -2. Max buffer size -3. Canopy depth - -The **max depth** is the maximum number of hops to get from any leaf to the root -of the tree. Since Merkle trees are binary trees, every leaf is connected only -to one other leaf. Max depth can then logically be used to calculate the number -of nodes for the tree with `2 ^ maxDepth`. - -The **max buffer size** is effectively the maximum number of concurrent changes -that you can make to a tree within a single slot with the root hash still being -valid. When multiple transactions are submitted in the same slot, each of which -is competing to update leafs on a standard Merkle tree, only the first to run -will be valid. This is because that “write” operation will modify the hash -stored in the account. Subsequent transactions in the same slot will be trying -to validate their data against a now-outdated hash. A concurrent Merkle tree has -a buffer so that the buffer can keep a running log of these modifications. This -allows the State Compression Program to validate multiple data writes in the -same slot because it can look up what the previous hashes were in the buffer and -compare against the appropriate hash. - -The **canopy depth** is the number of proof nodes that are stored onchain for -any given proof path. Verifying any leaf requires the complete proof path for -the tree. The complete proof path is made up of one proof node for every “layer” -of the tree, i.e. a max depth of 14 means there are 14 proof nodes. Every proof -node passed into the program adds 32 bytes to a transaction, so large trees -would quickly exceed the maximum transaction size limit. Caching proof nodes -onchain in the canopy helps improve program composability. - -Each of these three values, max depth, max buffer size, and canopy depth, comes -with a tradeoff. Increasing the value of any of these values increases the size -of the account used to store the tree, thus increasing the cost of creating the -tree. - -Choosing the max depth is fairly straightforward as it directly relates to the -number of leafs and therefore the amount of data you can store. If you need 1 -million cNFTs on a single tree where each cNFT is a leaf of the tree, find the -max depth that makes the following expression true: `2^maxDepth > 1 million`. -The answer is 20. - -Choosing a max buffer size is effectively a question of throughput: how many -concurrent writes do you need? The larger the buffer, the higher the throughput. - -Lastly, the canopy depth will determine your program's composability. State -compression pioneers have made it clear that omitting a canopy is a bad idea. -Program A can't call your state-compressed program B if doing so maxes out the -transaction size limits. Remember, program A also has required accounts and data -in addition to required proof paths, each of which take up transaction space. - -#### Data access on a state-compressed program - -A state-compressed account doesn't store the data itself. Rather, it stores the -concurrent Merkle tree structure discussed above. The raw data itself lives only -in the blockchain's cheaper **ledger state.** This makes data access somewhat -more difficult, but not impossible. - -The Solana ledger is a list of entries containing signed transactions. In -theory, this can be traced back to the genesis block. This effectively means any -data that has ever been put into a transaction exists in the ledger. - -Since the state compression hashing process occurs onchain, all the data exists -in the ledger state and could theoretically be retrieved from the original -transaction by replaying the entire chain state from the beginning. However, -it's much more straightforward (though still complicated) to have -an **indexer** track and index this data as the transactions occur. This ensures -there is an offchain “cache” of the data that anyone can access and subsequently -verify against the onchain root hash. - -This process is complex, but it will make sense after some practice. - -### State compression tooling - -The theory described above is essential to properly understanding state -compression. But you don't have to implement any of it from scratch. Brilliant -engineers have laid most of the groundwork for you in the form of the SPL State -Compression Program and the Noop Program. +Since a Merkle tree is represented as a single hash, any change to a leaf node +alters the root hash. This becomes problematic when multiple transactions in the +same slot try to update leaf data in the same slot. Since transactions are +executed serially i.e. one after the other — all but the first will fail since +the root hash and proof passed in will have been invalidated by the first +transaction executed. + +In short, a standard Merkle tree can only handle one leaf update per +[slot](https://solana.com/docs/terminology#slot). This significantly limits the +throughput in a state-compressed program that depends on a single Merkle tree +for its state. + +Thankfully, this issue can be addressed using a _concurrent_ Merkle tree. Unlike +a regular Merkle tree, a concurrent Merkle tree keeps a secure changelog of +recent updates, along with their root hash and the proof needed to derive it. +When multiple transactions in the same slot attempt to modify leaf data, the +changelog serves as a reference, enabling concurrent updates to the tree. + +How does the concurrent Merkle tree achieve this? In a standard Merkle tree, +only the root hash is stored. However, a concurrent Merkle tree includes extra +data that ensures subsequent writes can succeed. + +This includes: + +1. The root hash - The same root hash found in a regular Merkle tree. +2. A changelog buffer - A buffer containing proof data for recent root hash + changes, allowing further writes in the same slot to succeed. +3. A canopy - To update a specific leaf, you need the entire proof path from the + leaf to the root hash. The canopy stores intermediate proof nodes along this + path so that not all of them need to be sent from the client to the program. + +### Key Parameters for Configuring a Concurrent Merkle Tree + +As a developer, you are responsible for controlling three key parameters that +directly affect the tree’s size, cost, and the number of concurrent changes it +can handle: + +1. **Max Depth** +2. **Max Buffer Size** +3. **Canopy Depth** + +Let’s take a brief overview of each parameter. + +#### Max Depth + +The **max depth** determines how many levels or "hops" are required to reach the +root of the tree from any leaf. Since Merkle trees are structured as binary +trees, where each leaf is paired with only one other leaf, the max depth can be +used to calculate the total number of nodes in the tree with the formula: +`2^maxDepth`. + +Here’s a quick TypeScript function for illustration: + +```typescript +const getMaxDepth = (itemCount: number) => { + if (itemCount === 0) { + return 0; + } + return Math.ceil(Math.log2(itemCount)); +}; +``` + +A max depth of 20 would allow for over one million leaves, making it suitable +for storing large datasets like NFTs. + +#### Max Buffer Size + +The **max buffer size** controls how many concurrent updates can be made to the +tree within a single slot while keeping the root hash valid. In a standard +Merkle tree, only the first transaction in a slot would be successful since it +updates the root hash, causing all subsequent transactions to fail due to hash +mismatches. However, in a concurrent Merkle tree, the buffer maintains a log of +changes, allowing multiple transactions to update the tree simultaneously by +checking the appropriate root hash from the buffer. A larger buffer size +increases throughput by enabling more concurrent changes. + +#### Canopy Depth + +The **canopy depth** specifies how many proof nodes are stored onchain for any +given proof path. To verify any leaf in the tree, you need a complete proof +path, which includes one proof node for every layer of the tree. For a tree with +a max depth of 14, there will be 14 proof nodes in total. Each proof node adds +32 bytes to the transaction, and without careful management, large trees could +exceed the transaction size limit. + +Storing more proof nodes onchain (i.e., having a deeper canopy) allows other +programs to interact with your tree without exceeding transaction limits, but it +also uses more onchain storage. Consider the complexity of interactions with +your tree when deciding on an appropriate canopy depth. + +### Balancing Trade-offs + +These three values—max depth, max buffer size, and canopy depth—all come with +trade-offs. Increasing any of them will enlarge the account used to store the +tree, raising the cost of creating the tree. + +- **Max Depth:** This is straightforward to determine based on how much data + needs to be stored. For example, if you need to store 1 million compressed + NFTs (cNFTs), where each cNFT is a leaf, you would need a max depth of 20 + (`2^maxDepth > 1 million`). +- **Max Buffer Size:** The choice of buffer size is mainly a question of + throughput—how many concurrent updates are required? A larger buffer allows + for more updates in the same slot. +- **Canopy Depth:** A deeper canopy improves composability, enabling other + programs to interact with your state-compressed program without exceeding + transaction size limits. Omitting the canopy is discouraged, as it could cause + issues with transaction size, especially when other programs are involved. + +### Data Access in a State-Compressed Program + +In a state-compressed program, the actual data isn’t stored directly onchain. +Instead, the concurrent Merkle tree structure is stored, while the raw data +resides in the blockchain’s more affordable ledger state. This makes accessing +the data more challenging, but not impossible. + +The Solana ledger is essentially a list of entries containing signed +transactions, which can be traced back to the Genesis block theoretically. This +means any data that has ever been included in a transaction is stored in the +ledger. + +Since the state compression process happens onchain, all the data is still in +the ledger state. In theory, you could retrieve the original data by replaying +the entire chain state from the start. However, it’s far more practical (though +still somewhat complex) to use an indexer to track and index the data as the +transactions happen. This creates an offchain "cache" of the data that can be +easily accessed and verified against the onchain root hash. + +While this process may seem complex at first, it becomes clearer with practice. + +### State Compression Tooling + +While understanding the theory behind state compression is crucial, you don’t +have to build it all from scratch. Talented engineers have already developed +essential tools like the SPL State Compression Program and the Noop Program to +simplify the process. #### SPL State Compression and Noop Programs -The SPL State Compression Program exists to make the process of creating and -updating concurrent Merkle trees repeatable and composable throughout the Solana -ecosystem. It provides instructions for initializing Merkle trees, managing tree -leafs (i.e. add, update, remove data), and verifying leaf data. - -The State Compression Program also leverages a separate “no op” program whose -primary purpose is to make leaf data easier to index by logging it to the ledger -state. When you want to store compressed data, you pass it to the State -Compression program where it gets hashed and emitted as an “event” to the Noop -program. The hash gets stored in the corresponding concurrent Merkle tree, but -the raw data remains accessible through the Noop program's transaction logs. - -#### Index data for easy lookup - -Under normal conditions, you would typically access onchain data by fetching the -appropriate account. When using state compression, however, it's not so -straightforward. - -As mentioned above, the data now exists in the ledger state rather than in an -account. The easiest place to find the full data is in the logs of the Noop -instruction. Unfortunately, while this data will in a sense exist in the ledger -state forever, it will likely be inaccessible through validators after a certain -period of time. - -To save space and be more performant, validators don't retain every transaction -back to the genesis block. The specific amount of time you'll be able to access -the Noop instruction logs related to your data will vary based on the validator. -Eventually, you'll lose access to it if you're relying directly on instruction -logs. - -Technically, you *can* replay the transaction state back to the genesis block -but the average team isn't going to do that, and it certainly won't be -performant. The +The SPL State Compression Program is designed to streamline and standardize the +creation and management of concurrent Merkle trees across the Solana ecosystem. +It provides Instruction Handlers for initializing Merkle trees, handling tree +leaves (such as adding, updating, or removing data), and verifying the integrity +of leaf data. + +Additionally, the State Compression Program works in conjunction with a separate +"Noop" program. A [no-op program]() +does nothing - literally 'no operation.' The Solana Noop Program only logs data +to the ledger state, however that logging is essential to state compression: + +When you store compressed data, it’s passed to the State Compression Program, +which hashes the data and emits it as an "event" to the Noop Program. While the +hash is stored in the concurrent Merkle tree, the raw data can still be accessed +via the Noop Program’s transaction logs. + +### Indexing Data for Easy Lookup + +Typically, accessing onchain data is as simple as fetching the relevant account. +However, with state compression, it’s not that straightforward. + +As mentioned earlier, the data now resides in the ledger state rather than in an +account. The most accessible place to find the complete data is in the logs of +the Noop instruction. While this data remains in the ledger state indefinitely, +it may become inaccessible through validators after a certain period. + +Validators don’t store all transactions back to the Genesis block to save space +and improve performance. The length of time you can access Noop instruction logs +varies depending on the validator. Eventually, the logs will become unavailable +if you’re relying on direct access to them. + +In theory, it’s possible to replay transaction states back to the genesis block, +but this approach is impractical for most teams and isn’t efficient. Some RPC +providers have adopted the [Digital Asset Standard (DAS)](https://docs.helius.dev/compression-and-das-api/digital-asset-standard-das-api) -has been adopted by many RPC providers to enable efficient queries of compressed -NFTs and other assets. However, at the time of writing, it doesn't support -arbitrary state compression. Instead, you have two primary options: +to enable efficient querying of compressed NFTs and other assets. However, as of +now, DAS does not support arbitrary state compression. + +You essentially have two main options: -1. Use an indexing provider that will build a custom indexing solution for your - program that observes the events sent to the Noop program and stores the +1. Use an indexing provider to create a custom indexing solution for your + program, which will monitor the events sent to the Noop program and store the relevant data offchain. -2. Create your own pseudo-indexing solution that stores transaction data - offchain. +2. Build your indexing solution that stores transaction data offchain. -For many dApps, option 2 makes plenty of sense. Larger-scale applications may -need to rely on infrastructure providers to handle their indexing. +For many dApps, option 2 can be a practical choice. Larger-scale applications, +however, may need to rely on infrastructure providers to manage their indexing +needs. -### State compression development process +### State Compression Development Process -#### Create Rust types +#### Create Rust Types -As with a typical Anchor program, one of the first things you should do is -define your program's Rust types. However, Rust types in a traditional Anchor -program often represent accounts. In a state-compressed program, your account -state will only store the Merkle tree. The more “usable” data schema will just -be serialized and logged to the Noop program. +In a typical Anchor program, developers often start by defining the Rust types +that represent accounts. For a state-compressed program, however, the focus +shifts to defining types that align with the Merkle tree structure. -This type should include all the data stored in the leaf node and any contextual -information needed to make sense of the data. For example, if you were to create -a simple messaging program, your `Message` struct might look as follows: +In state compression, your onchain account will primarily store the Merkle tree. +The more practical data will be serialized and logged to the Noop program for +easier access and management. Your Rust types should encompass all data stored +in the leaf nodes and any contextual information necessary for interpreting that +data. For instance, if you’re developing a simple messaging program, your +`Message` struct might look something like this: ```rust -#[derive(AnchorSerialize)] +const DISCRIMINATOR_SIZE: usize = 8; +const PUBKEY_SIZE: usize = 32; + +/// A log entry for messages sent between two public keys. +#[derive(AnchorSerialize, AnchorDeserialize)] pub struct MessageLog { - leaf_node: [u8; 32], // The leaf node hash - from: Pubkey, // Pubkey of the message sender - to: Pubkey, // Pubkey of the message recipient - message: String, // The message to send + /// The leaf node hash for message logging. + pub leaf_node: [u8; DISCRIMINATOR_SIZE + PUBKEY_SIZE], + /// The public key of the message sender. + pub from: Pubkey, + /// The public key of the message recipient. + pub to: Pubkey, + /// The actual message content. + pub message: String, } -impl MessageLog { - // Constructs a new message log from given leaf node and message - pub fn new(leaf_node: [u8; 32], from: Pubkey, to: Pubkey, message: String) -> Self { - Self { leaf_node, from, to, message } - } +/// Constructs a new `MessageLog`. +/// +/// # Arguments +/// +/// * `leaf_node` - A 32-byte array representing the leaf node hash. +/// * `from` - The public key of the message sender. +/// * `to` - The public key of the message recipient. +/// * `message` - The message to be sent. +/// +/// # Returns +/// +/// Returns a new `MessageLog` instance. +pub fn new_message_log(leaf_node: [u8; DISCRIMINATOR_SIZE + PUBKEY_SIZE], from: Pubkey, to: Pubkey, message: String) -> MessageLog { + MessageLog { leaf_node, from, to, message } } ``` -To be abundantly clear, **this is not an account that you will be able to read -from**. Your program will be creating an instance of this type from instruction -inputs, not constructing an instance of this type from account data that it -reads. We'll discuss how to read data in a later section. +To be absolutely clear, the **`MessageLog` is not an account you will read +from**. Instead, your program will create an instance of `MessageLog` using +inputs from Instructions Handler, rather than constructing it from data read +from an account. We will cover how to read data from compressed accounts later. -#### Initialize a new tree +#### Initialize a New Tree -Clients will create and initialize the Merkle tree account in two separate -instructions. The first is simply allocating the account by calling System -Program. The second will be an instruction that you create on a custom program -that initializes the new account. This initialization is effectively just -recording what the max depth and buffer size for the Merkle tree should be. +To set up a new Merkle tree, clients need to perform two distinct steps. -All this instruction needs to do is build a CPI to invoke the -`init_empty_merkle_tree` instruction on the State Compression Program. Since -this requires the max depth and max buffer size, these will need to be passed in -as arguments to the instruction. +1. First, they allocate the account by calling the System Program. +2. Next, they use a custom program to initialize the new account. This + initialization involves setting the maximum depth and buffer size for the + Merkle tree. -Remember, the max depth refers to the maximum number of hops to get from any -leaf to the root of the tree. Max buffer size refers to the amount of space -reserved for storing a changelog of tree updates. This changelog is used to -ensure that your tree can support concurrent updates within the same block. +The initialization Instruction Handler must create a CPI (Cross-Program +Invocation) to call the `init_empty_merkle_tree` instruction from the State +Compression Program. You’ll need to provide the maximum depth and buffer size as +arguments to this instruction Handler. -For example, if we were initializing a tree for storing messages between users, -the instruction might look like this: +- **Max depth**: Defines the maximum number of hops needed to travel from any + leaf to the root of the tree. +- **Max buffer size**: Specifies the space allocated for storing a changelog of + tree updates. This changelog is essential for supporting concurrent updates + within the same block. + +For instance, if you are initializing a tree to store messages between users, +your Instruction Handler might look like this: ```rust +/// Initializes an empty Merkle tree for storing messages with a specified depth and buffer size. +/// +/// This function creates a CPI (Cross-Program Invocation) call to initialize the Merkle tree account +/// using the provided authority and compression program. The PDA (Program Derived Address) seeds are used for +/// signing the transaction. +/// +/// # Arguments +/// +/// * `ctx` - The context containing the accounts required for Merkle tree initialization. +/// * `max_depth` - The maximum depth of the Merkle tree. +/// * `max_buffer_size` - The maximum buffer size of the Merkle tree. +/// +/// # Returns +/// +/// This function returns a `Result<()>`, indicating success or failure. +/// +/// # Errors +/// +/// This function will return an error if the CPI call to `init_empty_merkle_tree` fails. pub fn create_messages_tree( ctx: Context, max_depth: u32, // Max depth of the Merkle tree @@ -311,130 +376,185 @@ pub fn create_messages_tree( ) -> Result<()> { // Get the address for the Merkle tree account let merkle_tree = ctx.accounts.merkle_tree.key(); - // Define the seeds for pda signing - let signer_seeds: &[&[&[u8]]] = &[ + + // The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[ &[ - merkle_tree.as_ref(), // The address of the Merkle tree account as a seed - &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the pda + merkle_tree.as_ref(), // The address of the Merkle tree account + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA ], ]; - // Create cpi context for init_empty_merkle_tree instruction. + // Create CPI context for `init_empty_merkle_tree` instruction handler let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.compression_program.to_account_info(), // The spl account compression program + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program Initialize { authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be initialized noop: ctx.accounts.log_wrapper.to_account_info(), // The noop program to log data }, - signer_seeds // The seeds for pda signing + signers_seeds // The seeds for PDAs signing ); - // CPI to initialize an empty Merkle tree with given max depth and buffer size + // CPI to initialize an empty Merkle tree with the given max depth and buffer size init_empty_merkle_tree(cpi_ctx, max_depth, max_buffer_size)?; Ok(()) } ``` -#### Add hashes to the tree - -With an initialized Merkle tree, it's possible to start adding data hashes. This -involves passing the uncompressed data to an instruction on your program that -will hash the data, log it to the Noop program, and use the State Compression -Program's `append` instruction to add the hash to the tree. The following -discuss what your instruction needs to do in depth: - -1. Use the `hashv` function from the `keccak` crate to hash the data. In most - cases, you'll want to also hash the owner or authority of the data as well to - ensure that it can only be modified by the proper authority. -2. Create a log object representing the data you wish to log to the Noop - Program, then call `wrap_application_data_v1` to issue a CPI to the Noop - program with this object. This ensures that the uncompressed data is readily - available to any client looking for it. For broad use cases like cNFTs, that - would be indexers. You might also create your own observing client to - simulate what indexers are doing but specific to your application. -3. Build and issue a CPI to the State Compression Program's `append` - instruction. This takes the hash computed in step 1 and adds it to the next - available leaf on your Merkle tree. Just as before, this requires the Merkle - tree address and the tree authority bump as signature seeds. - -When all this is put together using the messaging example, it looks something -like this: +#### Adding Hashes to the Tree + +Once the Merkle tree is initialized, you can begin adding data hashes to it. +This process involves passing the uncompressed data to an Instruction handler +within your program, which will hash the data, log it to the Noop Program, and +then use the State Compression Program’s `append` instruction to add the hash to +the tree. Here’s how the Instruction Handler operates in detail: + +1. **Hash the Data**: Use the `hashv` function from the `keccak` crate to hash + the data. It’s recommended to include the data owner or authority in the hash + to ensure that only the proper authority can modify it. +2. **Log the Data**: Create a log object representing the data you want to log + to the Noop Program. Then, call `wrap_application_data_v1` to issue a CPI + (Cross-Program Invocation) to the Noop Program with this object. This makes + the uncompressed data easily accessible to any client, such as indexers, that + may need it. You could also develop a custom client to observe and index data + for your application specifically. + +3. **Append the Hash**: Construct and issue a CPI to the State Compression + Program’s `append` Instruction. This will take the hash generated in step 1 + and append it to the next available leaf on the Merkle tree. As with previous + steps, this requires the Merkle tree address and tree authority bump as + signature seeds. + +When applied to a messaging system, the resulting implementation might look like +this: ```rust -// Instruction for appending a message to a tree. +/// Appends a message to the Merkle tree. +/// +/// This function hashes the message and the sender’s public key to create a leaf node, +/// logs the message using the noop program, and appends the leaf node to the Merkle tree. +/// +/// # Arguments +/// +/// * `ctx` - The context containing the accounts required for appending the message. +/// * `message` - The message to append to the Merkle tree. +/// +/// # Returns +/// +/// This function returns a `Result<()>`, indicating success or failure. +/// +/// # Errors +/// +/// This function will return an error if any of the CPI calls (logging or appending) fail. pub fn append_message(ctx: Context, message: String) -> Result<()> { - // Hash the message + whatever key should have update authority + // Hash the message + sender’s public key to create a leaf node let leaf_node = keccak::hashv(&[message.as_bytes(), ctx.accounts.sender.key().as_ref()]).to_bytes(); - // Create a new "message log" using the leaf node hash, sender, receipient, and message - let message_log = MessageLog::new(leaf_node.clone(), ctx.accounts.sender.key().clone(), ctx.accounts.receipient.key().clone(), message); - // Log the "message log" data using noop program + + // Create a new "MessageLog" using the leaf node hash, sender, recipient, and message + let message_log = new_message_log( + leaf_node.clone(), + ctx.accounts.sender.key().clone(), + ctx.accounts.recipient.key().clone(), + message, + ); + + // Log the "MessageLog" data using the noop program wrap_application_data_v1(message_log.try_to_vec()?, &ctx.accounts.log_wrapper)?; - // Get the address for the Merkle tree account + + // Get the Merkle tree account address let merkle_tree = ctx.accounts.merkle_tree.key(); - // Define the seeds for pda signing - let signer_seeds: &[&[&[u8]]] = &[ + + // The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[ &[ merkle_tree.as_ref(), // The address of the Merkle tree account as a seed - &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the pda + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA ], ]; - // Create a new cpi context and append the leaf node to the Merkle tree. + + // Create a CPI context and append the leaf node to the Merkle tree let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.compression_program.to_account_info(), // The spl account compression program + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program Modify { - authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA + authority: ctx.accounts.tree_authority.to_account_info(), // Authority for the Merkle tree, using a PDA merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified noop: ctx.accounts.log_wrapper.to_account_info(), // The noop program to log data }, - signer_seeds // The seeds for pda signing + signers_seeds, // The seeds for PDAs signing ); - // CPI to append the leaf node to the Merkle tree + + // CPI call to append the leaf node to the Merkle tree append(cpi_ctx, leaf_node)?; + Ok(()) } ``` -#### Update hashes +#### Updating Hashes -To update data, you need to create a new hash to replace the hash at the -relevant leaf on the Merkle tree. To do this, your program needs access to four -things: +To update a leaf in a Merkle tree, you’ll need to generate a new hash to replace +the existing one. This process requires four key inputs: -1. The index of the leaf to update +1. The index of the leaf you wish to update 2. The root hash of the Merkle tree -3. The original data you wish to modify +3. The original data you want to modify 4. The updated data -Given access to this data, a program instruction can follow very similar steps -as those used to append the initial data to the tree: - -1. **Verify update authority** - The first step is new. In most cases, you want - to verify update authority. This typically involves proving that the signer - of the `update` transaction is the true owner or authority of the leaf at the - given index. Since the data is compressed as a hash on the leaf, we can't - simply compare the `authority` public key to a stored value. Instead, we need - to compute the previous hash using the old data and the `authority` listed in - the account validation struct. We then build and issue a CPI to the State - Compression Program's `verify_leaf` instruction using our computed hash. -2. **Hash the new data** - This step is the same as the first step from - appending initial data. Use the `hashv` function from the `keccak` crate to - hash the new data and the update authority, each as their corresponding byte +Using these inputs, you can follow a series of steps similar to those used when +initially appending data to the tree: + +1. **Verify Update Authority**: The first step, unique to updates, is to verify + the authority of the entity making the update. This generally involves + checking that the signer of the `update` transaction is indeed the owner or + authority of the leaf at the specified index. Since the data in the leaf is + hashed, you can’t directly compare the authority’s public key to a stored + value. Instead, compute the previous hash using the old data and the + `authority` listed in the account validation struct. Then, invoke a CPI to + the State Compression Program’s `verify_leaf` instruction to confirm the hash + matches. + +2. **Hash the New Data**: This step mirrors the hashing process for appending + data. Use the `hashv` function from the `keccak` crate to hash the new data + and the update authority, converting each to its corresponding byte representation. -3. **Log the new data** - This step is the same as the second step from - appending initial data. Create an instance of the log struct and call - `wrap_application_data_v1` to issue a CPI to the Noop program. -4. **Replace the existing leaf hash** - This step is slightly different than the - last step of appending initial data. Build and issue a CPI to the State - Compression Program's `replace_leaf` instruction. This uses the old hash, the - new hash, and the leaf index to replace the data of the leaf at the given - index with the new hash. Just as before, this requires the Merkle tree - address and the tree authority bump as signature seeds. - -Combined into a single instruction, this process looks as follows: + +3. **Log the New Data**: As with the initial append operation, create a log + object to represent the new data, and use `wrap_application_data_v1` to + invoke the Noop Program via CPI. This ensures that the new uncompressed data + is logged and accessible offchain. + +4. **Replace the Existing Leaf Hash**: This step is slightly different from + appending new data. Here, you’ll need to invoke a CPI to the State + Compression Program’s `replace_leaf` instruction. This operation will replace + the existing hash at the specified leaf index with the new hash. You’ll need + to provide the old hash, the new hash, and the leaf index. As usual, the + Merkle tree address and tree authority bump are required as signature seeds. + +When combined, the instructions for updating a hash might look like this: ```rust +/// Updates a message in the Merkle tree. +/// +/// This function verifies the old message in the Merkle tree by checking its leaf node, +/// and then replaces it with a new message by modifying the Merkle tree’s leaf node. +/// +/// # Arguments +/// +/// * `ctx` - The context containing the accounts required for updating the message. +/// * `index` - The index of the leaf node to update. +/// * `root` - The root hash of the Merkle tree. +/// * `old_message` - The old message that is currently in the Merkle tree. +/// * `new_message` - The new message to replace the old message. +/// +/// # Returns +/// +/// This function returns a `Result<()>`, indicating success or failure. +/// +/// # Errors +/// +/// This function will return an error if verification or replacement of the Merkle tree leaf fails. pub fn update_message( ctx: Context, index: u32, @@ -442,59 +562,67 @@ pub fn update_message( old_message: String, new_message: String ) -> Result<()> { - let old_leaf = keccak - ::hashv(&[old_message.as_bytes(), ctx.accounts.sender.key().as_ref()]) - .to_bytes(); + // Hash the old message + sender’s public key to create the old leaf node + let old_leaf = keccak::hashv(&[old_message.as_bytes(), ctx.accounts.sender.key().as_ref()]).to_bytes(); + // Get the Merkle tree account address let merkle_tree = ctx.accounts.merkle_tree.key(); - // Define the seeds for pda signing - let signer_seeds: &[&[&[u8]]] = &[ + // The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[ &[ merkle_tree.as_ref(), // The address of the Merkle tree account as a seed - &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the pda + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA ], ]; - // Verify Leaf + // Verify the old leaf node in the Merkle tree { + // If the old and new messages are the same, no update is needed if old_message == new_message { msg!("Messages are the same!"); return Ok(()); } + // Create CPI context for verifying the leaf node let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.compression_program.to_account_info(), // The spl account compression program + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program VerifyLeaf { - merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be verified }, - signer_seeds // The seeds for pda signing + signers_seeds, // The seeds for PDAs signing ); - // Verify or Fails + + // Verify the old leaf node in the Merkle tree verify_leaf(cpi_ctx, root, old_leaf, index)?; } - let new_leaf = keccak - ::hashv(&[new_message.as_bytes(), ctx.accounts.sender.key().as_ref()]) - .to_bytes(); + // Hash the new message + sender’s public key to create the new leaf node + let new_leaf = keccak::hashv(&[new_message.as_bytes(), ctx.accounts.sender.key().as_ref()]).to_bytes(); - // Log out for indexers - let message_log = MessageLog::new(new_leaf.clone(), ctx.accounts.sender.key().clone(), ctx.accounts.recipient.key().clone(), new_message); - // Log the "message log" data using noop program + // Log the new message for indexers using the noop program + let message_log = new_message_log( + new_leaf.clone(), + ctx.accounts.sender.key().clone(), + ctx.accounts.recipient.key().clone(), + new_message, + ); wrap_application_data_v1(message_log.try_to_vec()?, &ctx.accounts.log_wrapper)?; - // replace leaf + // Replace the old leaf with the new leaf in the Merkle tree { + // Create CPI context for replacing the leaf node let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.compression_program.to_account_info(), // The spl account compression program + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program Modify { authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified noop: ctx.accounts.log_wrapper.to_account_info(), // The noop program to log data }, - signer_seeds // The seeds for pda signing + signers_seeds, // The seeds for PDAs signing ); - // CPI to append the leaf node to the Merkle tree + + // Replace the old leaf node with the new one in the Merkle tree replace_leaf(cpi_ctx, root, old_leaf, new_leaf, index)?; } @@ -502,55 +630,63 @@ pub fn update_message( } ``` -#### Delete hashes +#### Deleting Hashes -At the time of writing, the State Compression Program doesn't provide an -explicit `delete` instruction. Instead, you'll want to update leaf data with -data that indicates the data as “deleted.” The specific data will depend on your -use case and security concerns. Some may opt to set all data to 0, whereas -others might store a static string that all “deleted” items will have in common. +As of now, the State Compression Program does not have a dedicated `delete` +instruction. -#### Access data from a client +Instead, you can simulate deletion by updating the leaf data with a value that +signals it has been "deleted." -The discussion so far has covered 3 of the 4 standard CRUD procedures: Create, -Update, and Delete. What's left is one of the more difficult concepts in state -compression: reading data. +The exact value you choose will depend on your specific use case and security +requirements. For some, this may involve setting all data fields to zero, while +others might prefer storing a predefined static string that marks the leaf as +deleted. This approach allows you to handle deletions in a way that suits your +application’s needs without compromising data integrity. -Accessing data from a client is tricky primarily because the data isn't stored -in a format that is easy to access. The data hashes stored in the Merkle tree -account can't be used to reconstruct the initial data, and the data logged to -the Noop program isn't available indefinitely. +#### Accessing Data from a Client -Your best bet is one of two options: +We’ve covered creating, updating, and deleting data in state compression, but +reading data presents its unique challenges. -1. Work with an indexing provider to create a custom indexing solution for your - program, then write client-side code based on how the indexer gives you - access to the data. -2. Create your own pseudo-indexer as a lighter-weight solution. +Accessing compressed data from a client can be tricky because the Merkle tree +stores only data hashes, which cannot be used to recover the original data. +Additionally, the uncompressed data logged to the Noop program is not retained +indefinitely. -If your project is truly decentralized such that many participants will interact -with your program through means other than your own frontend, then option 2 -might not be sufficient. However, depending on the scale of the project or -whether or not you'll have control over most program access, it can be a viable -approach. +To access this data, you generally have two options: -There is no “right” way to do this. Two potential approaches are: +1. **Work with an indexing provider** to develop a custom solution tailored to + your program. This allows you to write client-side code to retrieve and + access the data based on how the indexer provides it. +2. **Create your own pseudo-indexer** to store and retrieve the data, offering a + lighter-weight solution. -1. Store the raw data in a database at the same time as sending it to the - program, along with the leaf that the data is hashed and stored to. -2. Create a server that observes your program's transactions, looks up the - associated Noop logs, decodes the logs, and stores them. +If your project is decentralized and expects widespread interaction beyond your +frontend, option 2 might not be sufficient. However, if you have control over +most program interactions, this approach can work. -We'll do a little bit of both when writing tests in this lesson's lab (though we -won't persist data in a db - it will only live in memory for the duration of the -tests). +There’s no one-size-fits-all solution here. Two potential strategies include: -The setup for this is somewhat tedious. Given a particular transaction, you can -fetch the transaction from the RPC provider, get the inner instructions -associated with the Noop program, use the `deserializeApplicationDataEvent` -function from the `@solana/spl-account-compression` JS package to get the logs, -then deserialize them using Borsh. Below is an example based on the messaging -program used above. +1. **Store raw data**: One approach is to store the raw data in a database + simultaneously by sending it to the program. This allows you to keep a record + of the data, along with the Merkle tree leaf where the data was hashed and + stored. + +2. **Create a transaction observer**: Another approach is to create a server + that observes the transactions your program executes. This server would fetch + transactions, look up the related Noop logs, decode them, and store the data. + +When writing tests in the lab, we’ll simulate both of these approaches, although +instead of using a database, the data will be stored in memory for the test’s +duration. + +The process of setting this up can be a bit complex. For a given transaction, +you’ll retrieve it from the RPC provider, extract the inner instructions related +to the Noop program, and use the `deserializeApplicationDataEvent` function from +the `@solana/spl-account-compression` JS package to decode the logs. Then, +you’ll use Borsh to deserialize the data. Here’s an example from the messaging +program to illustrate the process: ```typescript export async function getMessageLog( @@ -612,18 +748,24 @@ export async function getMessageLog( ### Conclusion -Generalized state compression can be difficult but is absolutely possible to -implement with the available tools. Additionally, the tools and programs will -only get better over time. If you come up with solutions that improve your -development experience, please share with the community! +Implementing generalized state compression may be challenging, but it is +entirely achievable using the available tools. As the ecosystem evolves, these +tools and programs will continue to improve, making the process more +streamlined. If you discover solutions that enhance your development experience, +please don’t hesitate to share them with the community! + + +Remember to write comprehensive tests for your state compression implementation. This ensures your program behaves correctly and helps catch potential issues early in the development process. + -## Lab +## Lab: Building a Note-Taking App with Generalized State Compression -Let's practice generalized state compression by creating a new Anchor program. -This program will use custom state compression to power a simple note-taking -app. +In this lab, we’ll walk through the process of developing an Anchor program that +uses custom state compression to power a basic note-taking app. This will give +you hands-on experience in working with compressed data and help reinforce key +concepts around state compression on Solana. -#### 1. Project setup +#### 1. Set up the Project Start by initializing an Anchor program: @@ -631,8 +773,9 @@ Start by initializing an Anchor program: anchor init compressed-notes ``` -We'll be using the `spl-account-compression` crate with the `cpi` feature -enabled. Let's add it as a dependency in `programs/compressed-notes/Cargo.toml`. +Next, we’ll add the `spl-account-compression` crate with the `cpi` feature +enabled. To do this, update the `Cargo.toml` file located at +`programs/compressed-notes` by adding the following dependency: ```toml [dependencies] @@ -641,9 +784,12 @@ spl-account-compression = { version="0.2.0", features = ["cpi"] } solana-program = "1.16.0" ``` -We'll be testing locally but we need both the Compression program and the Noop -program from Mainnet. We'll need to add these to the `Anchor.toml` in the root -directory so they get cloned to our local cluster. +We’ll be running tests locally, but we’ll need both the State Compression +Program and the Noop Program from the Mainnet to do so. To make sure these +programs are available on our local cluster, we need to include them in the +`Anchor.toml` file located in the root directory. Here’s how you can add them: + +In `Anchor.toml`, update the programs section with the following entries: ```toml [test.validator] @@ -656,15 +802,15 @@ address = "noopb9bkMVfRPU8AsbpTUg8AQkHtKwMYZiFUjNRtMmV" address = "cmtDvXumGCrqC1Age74AVPhSRVXJMd8PJS91L8KbNCK" ``` -Lastly, let's prepare the `lib.rs` file for the rest of the Demo. Remove the -`initialize` instruction and the `Initialize` accounts struct, then add the -imports shown in the code snippet below (be sure to put in **_your_** program -id): +Finally, let’s set up the `lib.rs` file for the remainder of the demo. Start by +removing the `initialize` instruction and the `Initialize` accounts struct. +Next, add the necessary imports as indicated in the code snippet, making sure to +include **_your_** program ID. ```rust use anchor_lang::{ prelude::*, - solana_program::keccak + solana_program::keccak, }; use spl_account_compression::{ Noop, @@ -676,125 +822,216 @@ use spl_account_compression::{ wrap_application_data_v1, }; -declare_id!("YOUR_KEY_GOES_HERE"); - -// STRUCTS GO HERE +// Replace with your program ID +declare_id!("PROGRAM_PUBLIC_KEY_GOES_HERE"); +/// A program that manages compressed notes using a Merkle tree for efficient storage and verification. #[program] pub mod compressed_notes { use super::*; - // FUNCTIONS GO HERE + // Define your program instructions here. + + /// Initializes a new Merkle tree for storing messages. + /// + /// This function creates a Merkle tree with the specified maximum depth and buffer size. + /// + /// # Arguments + /// + /// * `ctx` - The context containing the accounts required for initializing the tree. + /// * `max_depth` - The maximum depth of the Merkle tree. + /// * `max_buffer_size` - The maximum buffer size of the Merkle tree. + pub fn create_messages_tree( + ctx: Context, + max_depth: u32, + max_buffer_size: u32, + ) -> Result<()> { + // Tree creation logic here + Ok(()) + } + + /// Appends a new message to the Merkle tree. + /// + /// This function hashes the message and adds it as a leaf node to the tree. + /// + /// # Arguments + /// + /// * `ctx` - The context containing the accounts required for appending the message. + /// * `message` - The message to append to the Merkle tree. + pub fn append_message(ctx: Context, message: String) -> Result<()> { + // Message appending logic here + Ok(()) + } + + /// Updates an existing message in the Merkle tree. + /// + /// This function verifies the old message and replaces it with the new message in the tree. + /// + /// # Arguments + /// + /// * `ctx` - The context containing the accounts required for updating the message. + /// * `index` - The index of the message in the tree. + /// * `root` - The root of the Merkle tree. + /// * `old_message` - The old message to be replaced. + /// * `new_message` - The new message to replace the old message. + pub fn update_message( + ctx: Context, + index: u32, + root: [u8; 32], + old_message: String, + new_message: String, + ) -> Result<()> { + // Message updating logic here + Ok(()) + } + + // Add more functions as needed +} + +// Add structs for accounts, state, etc., here +/// Struct for holding the account information required for message operations. +#[derive(Accounts)] +pub struct MessageAccounts<'info> { + /// The Merkle tree account. + #[account(mut)] + pub merkle_tree: AccountInfo<'info>, + /// The authority for the Merkle tree. + pub tree_authority: AccountInfo<'info>, + /// The sender’s account. + pub sender: Signer<'info>, + /// The recipient’s account. + pub recipient: AccountInfo<'info>, + /// The compression program (Noop program). + pub compression_program: Program<'info, SplAccountCompression>, + /// The log wrapper account for logging data. + pub log_wrapper: AccountInfo<'info>, } ``` -For the rest of this Demo, we'll be making updates to the program code directly -in the `lib.rs` file. This simplifies the explanations a bit. You're welcome to -modify the structure as you will. +For the remainder of this demo, we’ll be making updates directly in the `lib.rs` +file. This approach simplifies the explanations. You can modify the structure as +needed. -Feel free to build before continuing. This ensures your environment is working -properly and shortens future build times. +It’s a good idea to build your project now to confirm that your environment is +set up correctly and to reduce build times in the future. #### 2. Define `Note` schema -Next, we're going to define what a note looks like within our program. Notes -should have the following properties: +Next, we’ll define the structure of a note within our program. Each note should +have the following attributes: -- `leaf_node` - this should be a 32-byte array representing the hash stored on - the leaf node -- `owner` - the public key of the note owner -- `note` - the string representation of the note +- `leaf_node` - a 32-byte array representing the hash stored on the leaf node. +- `owner` - the public key of the note’s owner. +- `note` - a string containing the text of the note. ```rust -#[derive(AnchorSerialize)] +#[derive(AnchorSerialize, AnchorDeserialize, Clone)] +/// A struct representing a log entry in the Merkle tree for a note. pub struct NoteLog { - leaf_node: [u8; 32], // The leaf node hash - owner: Pubkey, // Pubkey of the note owner - note: String, // The note message + /// The leaf node hash generated from the note data. + pub leaf_node: [u8; 32], + /// The public key of the note’s owner. + pub owner: Pubkey, + /// The content of the note. + pub note: String, } -impl NoteLog { - // Constructs a new note from given leaf node and message - pub fn new(leaf_node: [u8; 32], owner: Pubkey, note: String) -> Self { - Self { leaf_node, owner, note } - } +/// Constructs a new note log from a given leaf node, owner, and note message. +/// +/// # Arguments +/// +/// * `leaf_node` - A 32-byte array representing the hash of the note. +/// * `owner` - The public key of the note’s owner. +/// * `note` - The note message content. +/// +/// # Returns +/// +/// A new `NoteLog` struct containing the provided data. +pub fn create_note_log(leaf_node: [u8; 32], owner: Pubkey, note: String) -> NoteLog { + NoteLog { leaf_node, owner, note } } ``` -In a traditional Anchor program, this would be an account struct, but since -we're using state compression, our accounts won't be mirroring our native -structures. Since we don't need all the functionality of an account, we can just -use the `AnchorSerialize` derive macro rather than the `account` macro. +In a traditional Anchor program, a note would typically be represented by a +`Note` struct using the `account` macro. However, because we’re using state +compression we use `NoteLog`, a struct with the `AnchorSerialize` macro applied. -#### 3. Define input accounts and constraints +#### 3. Define Account Constraints -As luck would have it, every one of our instructions will be using the same -accounts. We'll create a single `NoteAccounts` struct for our account -validation. It'll need the following accounts: +All our instruction handlers will use the same +[account constraints](https://www.anchor-lang.com/docs/account-constraints): -- `owner` - this is the creator and owner of the note; should be a signer on the - transaction -- `tree_authority` - the authority for the Merkle tree; used for signing - compression-related CPIs -- `merkle_tree` - the address of the Merkle tree used to store the note hashes; - will be unchecked since it is validated by the State Compression Program -- `log_wrapper` - the address of the Noop Program -- `compression_program` - the address of the State Compression Program +- `owner` - The creator and owner of the note, who must sign the transaction. +- `tree_authority` - The authority for the Merkle tree, used for signing + compression-related CPIs. +- `merkle_tree` - The address of the Merkle tree where note hashes are stored; + this will be unchecked as it’s validated by the State Compression Program. +- `log_wrapper` - The address of the Noop Program. +- `compression_program` - The address of the State Compression Program. ```rust #[derive(Accounts)] +/// Accounts required for interacting with the Merkle tree for note management. pub struct NoteAccounts<'info> { - // The payer for the transaction + /// The payer for the transaction, who also owns the note. #[account(mut)] pub owner: Signer<'info>, - // The pda authority for the Merkle tree, only used for signing + /// The PDA (Program Derived Address) authority for the Merkle tree. + /// This account is only used for signing and is derived from the Merkle tree address. #[account( seeds = [merkle_tree.key().as_ref()], bump, )] pub tree_authority: SystemAccount<'info>, - // The Merkle tree account - /// CHECK: This account is validated by the spl account compression program + /// The Merkle tree account, where the notes are stored. + /// This account is validated by the SPL Account Compression program. + /// + /// The `UncheckedAccount` type is used since the account’s validation is deferred to the CPI. #[account(mut)] pub merkle_tree: UncheckedAccount<'info>, - // The noop program to log data + /// The Noop program used for logging data. + /// This is part of the SPL Account Compression stack and logs the note operations. pub log_wrapper: Program<'info, Noop>, - // The spl account compression program + /// The SPL Account Compression program used for Merkle tree operations. pub compression_program: Program<'info, SplAccountCompression>, } ``` -#### 4. Create `create_note_tree` instruction - -Next, let's create our `create_note_tree` instruction. Remember, clients will -have already allocated the Merkle tree account but will use this instruction to -initialize it. +#### 4. Create `create_note_tree` Instruction handler -All this instruction needs to do is build a CPI to invoke the -`init_empty_merkle_tree` instruction on the State Compression Program. To do -this, it needs the accounts listed in the `NoteAccounts` account validation -struct. It also needs two additional arguments: +Next, we’ll make the `create_note_tree` instruction handler, to initialize the +already allocated Merkle tree account. -1. `max_depth` - the max depth of the Merkle tree -2. `max_buffer_size` - the max buffer size of the Merkle tree +To implement this, you’ll need to build a CPI to invoke the +`init_empty_merkle_tree` instruction from the State Compression Program. The +`NoteAccounts` struct will provide the necessary accounts, but you’ll also need +to include two additional arguments: -These values are required for initializing the data on the Merkle tree account. -Remember, the max depth refers to the maximum number of hops to get from any -leaf to the root of the tree. Max buffer size refers to the amount of space -reserved for storing a changelog of tree updates. This changelog is used to -ensure that your tree can support concurrent updates within the same block. +1. **`max_depth`** - Specifies the maximum depth of the Merkle tree, indicating + the longest path from any leaf to the root. +2. **`max_buffer_size`** - Defines the maximum buffer size for the Merkle tree, + which determines the space allocated for recording tree updates. This buffer + is crucial for supporting concurrent updates within the same block. ```rust #[program] pub mod compressed_notes { use super::*; - // Instruction for creating a new note tree. + /// Instruction to create a new note tree (Merkle tree) for storing compressed notes. + /// + /// # Arguments + /// * `ctx` - The context that includes the accounts required for this transaction. + /// * `max_depth` - The maximum depth of the Merkle tree. + /// * `max_buffer_size` - The maximum buffer size of the Merkle tree. + /// + /// # Returns + /// * `Result<()>` - Returns a success or error result. pub fn create_note_tree( ctx: Context, max_depth: u32, // Max depth of the Merkle tree @@ -803,59 +1040,61 @@ pub mod compressed_notes { // Get the address for the Merkle tree account let merkle_tree = ctx.accounts.merkle_tree.key(); - // Define the seeds for pda signing - let signer_seeds: &[&[&[u8]]] = &[&[ - merkle_tree.as_ref(), // The address of the Merkle tree account as a seed - &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the pda + // The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[&[ + merkle_tree.as_ref(), // The Merkle tree account address as the seed + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the tree authority PDA ]]; - // Create cpi context for init_empty_merkle_tree instruction. + // Create a CPI (Cross-Program Invocation) context for initializing the empty Merkle tree. let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.compression_program.to_account_info(), // The spl account compression program + ctx.accounts.compression_program.to_account_info(), // The SPL Account Compression program Initialize { - authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA - merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be initialized - noop: ctx.accounts.log_wrapper.to_account_info(), // The noop program to log data + authority: ctx.accounts.tree_authority.to_account_info(), // PDA authority for the Merkle tree + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account + noop: ctx.accounts.log_wrapper.to_account_info(), // The Noop program for logging data }, - signer_seeds, // The seeds for pda signing + signers_seeds, // The seeds for PDAs signing ); - // CPI to initialize an empty Merkle tree with given max depth and buffer size + // CPI call to initialize an empty Merkle tree with the specified depth and buffer size. init_empty_merkle_tree(cpi_ctx, max_depth, max_buffer_size)?; + Ok(()) } - //... + // Additional functions for the program can go here... } ``` -Ensure that your signer seeds on the CPI include both the Merkle tree address -and the tree authority bump. - -#### 5. Create `append_note` instruction - -Now, let's create our `append_note` instruction. This instruction needs to take -the raw note as a String and compress it into a hash that we'll store on the -Merkle tree. We'll also log the note to the Noop program so the entirety of the -data exists within the chain's state. - -The steps here are as follows: - -1. Use the `hashv` function from the `keccak` crate to hash the note and owner, - each as their corresponding byte representation. It's **_crucial_** that you - hash the owner as well as the note. This is how we'll verify note ownership - before updates in the update instruction. -2. Create an instance of the `NoteLog` struct using the hash from step 1, the - owner's public key, and the raw note as a String. Then call - `wrap_application_data_v1` to issue a CPI to the Noop program, passing the - instance of `NoteLog`. This ensures the entirety of the note (not just the - hash) is readily available to any client looking for it. For broad use cases - like cNFTs, that would be indexers. You might create your observing client to - simulate what indexers are doing but for your own application. -3. Build and issue a CPI to the State Compression Program's `append` - instruction. This takes the hash computed in step 1 and adds it to the next - available leaf on your Merkle tree. Just as before, this requires the Merkle - tree address and the tree authority bump as signature seeds. +Make sure that when setting up your CPI, you include both the Merkle tree +address and the tree authority bump in the signer seeds. + +#### 5. Create `append_note` Instruction handler + +Let’s create the `append_note` instruction handler. This will compress a raw +note into a hash and store it on the Merkle tree, while also logging the note to +the Noop program to ensure all data remains available onchain. + +Here’s how to accomplish this: + +1. **Hash the Data**: Utilize the `hashv` function from the `keccak` crate to + compute a hash of the note and the owner’s public key. Both should be + converted to their byte representations. It’s essential to hash the owner + along with the note to facilitate ownership verification during updates. + +2. **Log the Data**: Create a `NoteLog` instance with the hash from step 1, the + owner’s public key, and the note as a `String`. Then, use + `wrap_application_data_v1` to issue a CPI to the Noop program with this + `NoteLog` instance. This ensures the complete note (not just the hash) is + available to clients, similar to how indexers manage cNFTs. You might also + develop an observing client to simulate indexer functionality specific to + your application. + +3. **Append to the Merkle Tree**: Build and issue a CPI to the State Compression + Program’s `append` instruction. This will add the hash from step 1 to the + next available leaf on your Merkle tree. Ensure that the Merkle tree address + and the tree authority bump are included as signature seeds. ```rust #[program] @@ -864,34 +1103,47 @@ pub mod compressed_notes { //... - // Instruction for appending a note to a tree. + /// Instruction to append a note to the Merkle tree. + /// + /// # Arguments + /// * `ctx` - The context containing accounts needed for this transaction. + /// * `note` - The note message to append as a leaf node in the Merkle tree. + /// + /// # Returns + /// * `Result<()>` - Returns a success or error result. pub fn append_note(ctx: Context, note: String) -> Result<()> { - // Hash the "note message" which will be stored as leaf node in the Merkle tree - let leaf_node = - keccak::hashv(&[note.as_bytes(), ctx.accounts.owner.key().as_ref()]).to_bytes(); - // Create a new "note log" using the leaf node hash and note. + // Step 1: Hash the note message to create a leaf node for the Merkle tree + let leaf_node = keccak::hashv(&[note.as_bytes(), ctx.accounts.owner.key().as_ref()]).to_bytes(); + + // Step 2: Create a new NoteLog instance containing the leaf node, owner, and note let note_log = NoteLog::new(leaf_node.clone(), ctx.accounts.owner.key().clone(), note); - // Log the "note log" data using noop program + + // Step 3: Log the NoteLog data using the Noop program wrap_application_data_v1(note_log.try_to_vec()?, &ctx.accounts.log_wrapper)?; - // Get the address for the Merkle tree account + + // Step 4: Get the Merkle tree account key (address) let merkle_tree = ctx.accounts.merkle_tree.key(); - // Define the seeds for pda signing - let signer_seeds: &[&[&[u8]]] = &[&[ + + // Step 5: The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[&[ merkle_tree.as_ref(), // The address of the Merkle tree account as a seed - &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the pda + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA ]]; - // Create a new cpi context and append the leaf node to the Merkle tree. + + // Step 6: Create a CPI (Cross-Program Invocation) context to modify the Merkle tree let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.compression_program.to_account_info(), // The spl account compression program + ctx.accounts.compression_program.to_account_info(), // SPL Account Compression program Modify { - authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA - merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified - noop: ctx.accounts.log_wrapper.to_account_info(), // The noop program to log data + authority: ctx.accounts.tree_authority.to_account_info(), // The PDA authority for the + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to modify + noop: ctx.accounts.log_wrapper.to_account_info(), // The Noop program for logging data }, - signer_seeds, // The seeds for pda signing + signers_seeds, // Seeds for PDAs with that will sign the transaction ); - // CPI to append the leaf node to the Merkle tree + + // Step 7: Append the leaf node to the Merkle tree using CPI append(cpi_ctx, leaf_node)?; + Ok(()) } @@ -899,42 +1151,42 @@ pub mod compressed_notes { } ``` -#### 6. Create `update_note` instruction - -The last instruction we'll make is the `update_note` instruction. This should -replace an existing leaf with a new hash representing the new updated note data. - -For this to work, we'll need the following parameters: - -1. `index` - the index of the leaf we are going to update -2. `root` - the root hash of the Merkle tree -3. `old_note` - the string representation of the old note we're updating -4. `new_note` - the string representation of the new note we want to update to - -Remember, the steps here are similar to `append_note`, but with some minor -additions and modifications: - -1. The first step is new. We need to first prove that the `owner` calling this - function is the true owner of the leaf at the given index. Since the data is - compressed as a hash on the leaf, we can't simply compare the `owner` public - key to a stored value. Instead, we need to compute the previous hash using - the old note data and the `owner` listed in the account validation struct. We - then build and issue a CPI to the State Compression Program's `verify_leaf` - instruction using our computed hash. -2. This step is the same as the first step from creating the `append_note` - instruction. Use the `hashv` function from the `keccak` crate to hash the new - note and its owner, each as their corresponding byte representation. -3. This step is the same as the second step from creating the `append_note` - instruction. Create an instance of the `NoteLog` struct using the hash from - step 2, the owner's public key, and the new note as a string. Then call - `wrap_application_data_v1` to issue a CPI to the Noop program, passing the - instance of `NoteLog` -4. This step is slightly different than the last step from creating the - `append_note` instruction. Build and issue a CPI to the State Compression - Program's `replace_leaf` instruction. This uses the old hash, the new hash, - and the leaf index to replace the data of the leaf at the given index with - the new hash. Just as before, this requires the Merkle tree address and the - tree authority bump as signature seeds. +#### 6. Create `update_note` Instruction Handler + +The final instruction we’ll implement is `update_note`, which will replace an +existing leaf with a new hash that represents the updated note data. + +To perform this update, you’ll need the following parameters: + +1. **Index**: The index of the leaf to be updated. +2. **Root**: The root hash of the Merkle tree. +3. **Old Note**: The string representation of the note that is being updated. +4. **New Note**: The string representation of the updated note. + +The process for this instruction is similar to `append_note`, with some +additional steps: + +1. **Verify Ownership**: Before updating, prove that the `owner` executing this + instruction is the rightful owner of the leaf at the specified index. Since + the leaf data is compressed as a hash, you can’t directly compare the + `owner`'s public key. Instead, compute the previous hash using the old note + data and the `owner` from the account validation struct. Then, use this + computed hash to build and issue a CPI to the State Compression Program’s + `verify_leaf` instruction. + +2. **Hash the New Data**: Hash the new note and the owner’s public key using the + `hashv` function from the `keccak` crate, converting each to its byte + representation. + +3. **Log the New Data**: Create a `NoteLog` instance with the new hash from step + 2, the owner’s public key, and the new note. Call `wrap_application_data_v1` + to issue a CPI to the Noop program with this `NoteLog` instance, ensuring the + updated note data is available to clients. + +4. **Replace the Leaf**: Build and issue a CPI to the State Compression + Program’s `replace_leaf` instruction. This will replace the old hash with the + new hash at the specified leaf index. Ensure the Merkle tree address and the + tree authority bump are included as signature seeds. ```rust #[program] @@ -943,85 +1195,97 @@ pub mod compressed_notes { //... - pub fn update_note( + /// Instruction to update a note in the Merkle tree. + /// + /// # Arguments + /// * `ctx` - The context containing accounts needed for this transaction. + /// * `index` - The index of the note to update in the Merkle tree. + /// * `root` - The root hash of the Merkle tree for verification. + /// * `old_note` - The current note to be updated. + /// * `new_note` - The new note that will replace the old one. + /// + /// # Returns + /// * `Result<()>` - Returns a success or error result. + pub fn update_note( ctx: Context, index: u32, root: [u8; 32], old_note: String, new_note: String, ) -> Result<()> { - let old_leaf = - keccak::hashv(&[old_note.as_bytes(), ctx.accounts.owner.key().as_ref()]).to_bytes(); + // Step 1: Hash the old note to generate the corresponding leaf node + let old_leaf = keccak::hashv(&[old_note.as_bytes(), ctx.accounts.owner.key().as_ref()]).to_bytes(); + // Step 2: Get the address of the Merkle tree account let merkle_tree = ctx.accounts.merkle_tree.key(); - // Define the seeds for pda signing - let signer_seeds: &[&[&[u8]]] = &[&[ + // Step 3: The seeds for PDAs signing + let signers_seeds: &[&[&[u8]]] = &[&[ merkle_tree.as_ref(), // The address of the Merkle tree account as a seed - &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the pda + &[*ctx.bumps.get("tree_authority").unwrap()], // The bump seed for the PDA ]]; - // Verify Leaf - { - if old_note == new_note { - msg!("Notes are the same!"); - return Ok(()); - } - - let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.compression_program.to_account_info(), // The spl account compression program - VerifyLeaf { - merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified - }, - signer_seeds, // The seeds for pda signing - ); - // Verify or Fails - verify_leaf(cpi_ctx, root, old_leaf, index)?; + // Step 4: Check if the old note and new note are the same + if old_note == new_note { + msg!("Notes are the same!"); + return Ok(()); } - let new_leaf = - keccak::hashv(&[new_note.as_bytes(), ctx.accounts.owner.key().as_ref()]).to_bytes(); + // Step 5: Verify the leaf node in the Merkle tree + let verify_cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program + VerifyLeaf { + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified + }, + signers_seeds, // The seeds for PDAs signing + ); + // Verify or fail + verify_leaf(verify_cpi_ctx, root, old_leaf, index)?; + + // Step 6: Hash the new note to create the new leaf node + let new_leaf = keccak::hashv(&[new_note.as_bytes(), ctx.accounts.owner.key().as_ref()]).to_bytes(); - // Log out for indexers + // Step 7: Create a NoteLog entry for the new note let note_log = NoteLog::new(new_leaf.clone(), ctx.accounts.owner.key().clone(), new_note); - // Log the "note log" data using noop program + + // Step 8: Log the NoteLog data using the Noop program wrap_application_data_v1(note_log.try_to_vec()?, &ctx.accounts.log_wrapper)?; - // replace leaf - { - let cpi_ctx = CpiContext::new_with_signer( - ctx.accounts.compression_program.to_account_info(), // The spl account compression program - Modify { - authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA - merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified - noop: ctx.accounts.log_wrapper.to_account_info(), // The noop program to log data - }, - signer_seeds, // The seeds for pda signing - ); - // CPI to append the leaf node to the Merkle tree - replace_leaf(cpi_ctx, root, old_leaf, new_leaf, index)?; - } + // Step 9: Prepare to replace the old leaf node with the new one in the Merkle tree + let modify_cpi_ctx = CpiContext::new_with_signer( + ctx.accounts.compression_program.to_account_info(), // The SPL account compression program + Modify { + authority: ctx.accounts.tree_authority.to_account_info(), // The authority for the Merkle tree, using a PDA + merkle_tree: ctx.accounts.merkle_tree.to_account_info(), // The Merkle tree account to be modified + noop: ctx.accounts.log_wrapper.to_account_info(), // The Noop program to log data + }, + signers_seeds, // The seeds for PDAs signing + ); + + // Step 10: Replace the old leaf node with the new leaf node in the Merkle tree + replace_leaf(modify_cpi_ctx, root, old_leaf, new_leaf, index)?; Ok(()) } } ``` -#### 7. Client test setup +#### 7. Client Test Setup -We're going to write a few tests to ensure that our program works as expected. -First, let's do some setup. +To ensure our program functions correctly, we’ll set up and write some tests. +Here’s what you need to do for the setup: -We'll be using the `@solana/spl-account-compression` package. Go ahead and -install it: +1. **Install Dependencies**: We’ll be using the + `@solana/spl-account-compression` package for our tests. Install it using the + following command: ```bash yarn add @solana/spl-account-compression ``` -Next, we're going to give you the contents of a utility file we've created to -make testing easier. Create a `utils.ts` file in the `tests` directory, add in -the below, then we'll explain it. +2. **Create Utility File**: To simplify testing, we’ve provided a utility file. + Create a `utils.ts` file in the `tests` directory and add the provided + contents. We’ll go over the details of this file shortly. ```typescript import { @@ -1130,36 +1394,41 @@ export async function getNoteLog(connection: Connection, txSignature: string) { } ``` -There are 3 main things in the above file: +The `utils.ts` file contains three key components: + +1. **`NoteLog` Class**: This class represents the note log that we’ll extract + from the Noop program logs. It also includes the Borsh schema, named + `NoteLogBorshSchema`, which is used for deserialization. -1. `NoteLog` - a class representing the note log we'll find in the Noop program - logs. We've also added the borsh schema as `NoteLogBorshSchema` for - deserialization. -2. `getHash` - a function that creates a hash of the note and note owner so we - can compare it to what we find on the Merkle tree -3. `getNoteLog` - a function that looks through the provided transaction's logs, - finds the Noop program logs, then deserializes and returns the corresponding - Note log. +2. **`getHash` Function**: This function generates a hash from the note and its + owner, allowing us to compare it against the data in the Merkle tree. -#### 8. Write client tests +3. **`getNoteLog` Function**: This function searches through the transaction + logs to locate the Noop program logs then deserializes and retrieves the + corresponding `NoteLog`. -Now that we've got our packages installed and utility file ready, let's dig into -the tests themselves. We're going to create four of them: +#### 8. Write Client Tests -1. Create Note Tree - this will create the Merkle tree we'll be using to store - note hashes -2. Add Note - this will call our `append_note` instruction -3. Add Max Size Note - this will call our `append_note` instruction with a note - that maxes out the 1232 bytes allowed in a single transaction -4. Update First Note - this will call our `update_note` instruction to modify - the first note we added +With our packages and utility file set up, we’re ready to dive into writing the +tests. We will create four tests for our program: -The first test is mostly just for setup. In the last three tests, we'll be -asserting each time that the note hash on the tree matches what we would expect -given the note text and signer. +1. **Create Note Tree**: This test will initialize the Merkle tree for storing + note hashes. +2. **Add Note**: This test will invoke the `append_note` instruction to add a + note to the tree. +3. **adds max size note to the Merkle tree**: This test will also use the + `append_note` instruction, but with a note that reaches the maximum allowable + size of 1232 bytes in a single transaction. +4. **Updates the first note in the Merkle tree**: This test will use the + `update_note` instruction to modify the first note that was added. -Let's start with our imports. There are quite a few from Anchor, -`@solana/web3.js`, `@solana/spl-account-compression`, and our own utils file. +The first test is mainly for setup purposes. For the remaining three tests, we +will check that the note hash in the Merkle tree matches the expected value +based on the note content and the signer. + +We’ll start by setting up our imports. This includes a variety of components +from Anchor, `@solana/web3.js`, `@solana/spl-account-compression`, and our own +utility functions. ```typescript import * as anchor from "@coral-xyz/anchor"; @@ -1183,9 +1452,13 @@ import { getHash, getNoteLog } from "./utils"; import { assert } from "chai"; ``` -Next, we'll want to set up the state variables we'll be using throughout our -tests. This includes the default Anchor setup as well as generating a Merkle -tree keypair, the tree authority, and some notes. +Next, we’ll set up the state variables needed for our tests. This setup will +include: + +1. **Default Anchor Setup**: Configure the basic environment for Anchor testing. +2. **Merkle Tree Keypair**: Generate a keypair for the Merkle tree. +3. **Tree Authority**: Create a keypair for the authority of the Merkle tree. +4. **Notes**: Define some sample notes to use in the tests. ```typescript describe("compressed-notes", () => { @@ -1193,7 +1466,7 @@ describe("compressed-notes", () => { anchor.setProvider(provider); const connection = new Connection( provider.connection.rpcEndpoint, - "confirmed", // has to be confirmed for some of the methods below + "confirmed", ); const wallet = provider.wallet as anchor.Wallet; @@ -1203,7 +1476,6 @@ describe("compressed-notes", () => { const merkleTree = Keypair.generate(); // Derive the PDA to use as the tree authority for the Merkle tree account - // This is a PDA derived from the Note program, which allows the program to sign for appends instructions to the tree const [treeAuthority] = PublicKey.findProgramAddressSync( [merkleTree.publicKey.toBuffer()], program.programId, @@ -1213,19 +1485,23 @@ describe("compressed-notes", () => { const secondNote = "0".repeat(917); const updatedNote = "updated note"; - // TESTS GO HERE + describe("Merkle Tree Operations", () => { + // Tests will go here + }); }); ``` -Finally, let's start with the tests themselves. First the `Create Note Tree` -test. This test will do two things: +Now, let’s dive into the `Create Note Tree` test. This test will accomplish two +key tasks: -1. Allocate a new account for the Merkle tree with a max depth of 3, max buffer - size of 8, and canopy depth of 0 -2. Initialize this new account using our program's `createNoteTree` instruction +1. **Allocate a New Merkle Tree Account**: Create a new account for the Merkle + tree, specifying a max depth of 3, a max buffer size of 8, and a canopy depth + of 0. +2. **Initialize the Account**: Use our program’s `createNoteTree` instruction to + set up the newly allocated Merkle tree account. ```typescript -it("Create Note Tree", async () => { +it("creates a new note tree", async () => { const maxDepthSizePair: ValidDepthSizePair = { maxDepth: 3, maxBufferSize: 8, @@ -1233,7 +1509,7 @@ it("Create Note Tree", async () => { const canopyDepth = 0; - // instruction to create new account with required space for tree + // Instruction to create a new account with the required space for the tree const allocTreeIx = await createAllocTreeIx( connection, merkleTree.publicKey, @@ -1242,12 +1518,13 @@ it("Create Note Tree", async () => { canopyDepth, ); - // instruction to initialize the tree through the Note program + // Instruction to initialize the tree through the Note program const ix = await program.methods .createNoteTree(maxDepthSizePair.maxDepth, maxDepthSizePair.maxBufferSize) .accounts({ + owner: wallet.publicKey, merkleTree: merkleTree.publicKey, - treeAuthority: treeAuthority, + treeAuthority, logWrapper: SPL_NOOP_PROGRAM_ID, compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, }) @@ -1255,114 +1532,118 @@ it("Create Note Tree", async () => { const tx = new Transaction().add(allocTreeIx, ix); await sendAndConfirmTransaction(connection, tx, [wallet.payer, merkleTree]); -}); -``` -Next, we'll create the `Add Note` test. It should call `append_note` with -`firstNote`, then check that the onchain hash matches our computed hash and that -the note log matches the text of the note we passed into the instruction. + // Fetch the Merkle tree account to confirm it’s initialized + const merkleTreeAccount = + await ConcurrentMerkleTreeAccount.fromAccountAddress( + connection, + merkleTree.publicKey, + ); + assert(merkleTreeAccount, "Merkle tree should be initialized"); +}); -```typescript -it("Add Note", async () => { +it("adds a note to the Merkle tree", async () => { const txSignature = await program.methods .appendNote(firstNote) .accounts({ + owner: wallet.publicKey, merkleTree: merkleTree.publicKey, - treeAuthority: treeAuthority, + treeAuthority, logWrapper: SPL_NOOP_PROGRAM_ID, compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, }) .rpc(); const noteLog = await getNoteLog(connection, txSignature); - const hash = getHash(firstNote, provider.publicKey); + const hash = getHash(firstNote, wallet.publicKey); - assert(hash === Buffer.from(noteLog.leafNode).toString("hex")); - assert(firstNote === noteLog.note); + assert( + hash === Buffer.from(noteLog.leafNode).toString("hex"), + "Leaf node hash should match", + ); + assert(firstNote === noteLog.note, "Note should match the appended note"); }); -``` - -Next, we'll create the `Add Max Size Note` test. It is the same as the previous -test, but with the second note. -```typescript -it("Add Max Size Note", async () => { - // Size of note is limited by max transaction size of 1232 bytes, minus additional data required for the instruction +it("adds max size note to the Merkle tree", async () => { const txSignature = await program.methods .appendNote(secondNote) .accounts({ + owner: wallet.publicKey, merkleTree: merkleTree.publicKey, - treeAuthority: treeAuthority, + treeAuthority, logWrapper: SPL_NOOP_PROGRAM_ID, compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, }) .rpc(); const noteLog = await getNoteLog(connection, txSignature); - const hash = getHash(secondNote, provider.publicKey); + const hash = getHash(secondNote, wallet.publicKey); - assert(hash === Buffer.from(noteLog.leafNode).toString("hex")); - assert(secondNote === noteLog.note); + assert( + hash === Buffer.from(noteLog.leafNode).toString("hex"), + "Leaf node hash should match", + ); + assert( + secondNote === noteLog.note, + "Note should match the appended max size note", + ); }); -``` - -Lastly, we'll create the `Update First Note` test. This is slightly more complex -than adding a note. We'll do the following: -1. Get the Merkle tree root as it's required by the instruction. -2. Call the `update_note` instruction of our program, passing in the index 0 - (for the first note), the Merkle tree root, the first note, and the updated - data. Remember, it needs the first note and the root because the program must - verify the entire proof path for the note's leaf before it can be updated. - -```typescript -it("Update First Note", async () => { +it("updates the first note in the Merkle tree", async () => { const merkleTreeAccount = await ConcurrentMerkleTreeAccount.fromAccountAddress( connection, merkleTree.publicKey, ); - - const rootKey = merkleTreeAccount.tree.changeLogs[0].root; - const root = Array.from(rootKey.toBuffer()); + const root = merkleTreeAccount.getCurrentRoot(); const txSignature = await program.methods .updateNote(0, root, firstNote, updatedNote) .accounts({ + owner: wallet.publicKey, merkleTree: merkleTree.publicKey, - treeAuthority: treeAuthority, + treeAuthority, logWrapper: SPL_NOOP_PROGRAM_ID, compressionProgram: SPL_ACCOUNT_COMPRESSION_PROGRAM_ID, }) .rpc(); const noteLog = await getNoteLog(connection, txSignature); - const hash = getHash(updatedNote, provider.publicKey); + const hash = getHash(updatedNote, wallet.publicKey); - assert(hash === Buffer.from(noteLog.leafNode).toString("hex")); - assert(updatedNote === noteLog.note); + assert( + hash === Buffer.from(noteLog.leafNode).toString("hex"), + "Leaf node hash should match after update", + ); + assert( + updatedNote === noteLog.note, + "Updated note should match the logged note", + ); }); ``` -That's it, congrats! Go ahead and run `anchor test` and you should get four -passing tests. +That’s a wrap—congratulations! Run `anchor test`, and you should see all four +tests passing. -If you're running into issues, feel free to go back through some of the demo or -look at the full solution code in the +If you encounter any issues, don’t hesitate to revisit the demo or check out the +complete solution code in the [Compressed Notes repository](https://github.com/unboxed-software/anchor-compressed-notes). -## Challenge +### Challenge -Now that you've practiced the basics of state compression, add a new instruction -to the Compressed Notes program. This new instruction should allow users to -delete an existing note. keep in mind that you can't remove a leaf from the -tree, so you'll need to decide what “deleted” looks like for your program. Good -luck! +Now that you’ve got the hang of state compression, it’s time to add a new +feature to the Compressed Notes program. Your task is to implement an +instruction that allows users to delete an existing note. Keep in mind that you +can’t physically remove a leaf from the Merkle tree, so you’ll need to come up +with a method to signify that a note has been deleted. -If you'd like a very simple example of a delete function, check out the -[`solution` branch on GitHub](https://github.com/Unboxed-Software/anchor-compressed-notes/tree/solution). +Good luck, and happy coding! + +For a straightforward example of how to implement a delete function, check out +the +[`main` branch on GitHub](https://github.com/Unboxed-Software/anchor-compressed-notes/tree/main). -Push your code to GitHub and -[tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=60f6b072-eaeb-469c-b32e-5fea4b72d1d1)! +Push your code to GitHub and [let us know what you think of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=60f6b072-eaeb-469c-b32e-5fea4b72d1d1)! +``` diff --git a/content/courses/state-compression/metadata.yml b/content/courses/state-compression/metadata.yml index a95b504a7..3a86b52a2 100644 --- a/content/courses/state-compression/metadata.yml +++ b/content/courses/state-compression/metadata.yml @@ -6,7 +6,3 @@ lessons: - compressed-nfts - generalized-state-compression priority: 65 -# Uses deprecated @metaplex-foundation/js library for NFTs -# which also uses old RPC methods and no longer functions. -# TODO: Superteam to update -isHidden: true diff --git a/content/courses/token-extensions/close-mint.md b/content/courses/token-extensions/close-mint.md index 42a0df6eb..8c3cd9caa 100644 --- a/content/courses/token-extensions/close-mint.md +++ b/content/courses/token-extensions/close-mint.md @@ -198,10 +198,10 @@ await setAuthority( ## Lab -In this lab, we'll create a mint with the `close mint` extension. We will then -mint some of the tokens and see what happens when we try to close it with a -non-zero supply (hint, the close transaction will fail). Lastly, we will burn -the supply and close the account. +In this lab, we'll create a token mint account with the `close mint` extension. +We will then mint some of the tokens and see what happens when we try to close +it with a non-zero supply (hint, the close transaction will fail). Lastly, we +will burn the supply and close the account. ### 1. Getting Started @@ -304,6 +304,14 @@ running `solana config get` in your terminal. And then go to address. You can get your address from running `solana address` in your terminal. +For example, assuming `keypairPath` is `/home/.config/solana/id.json` + +```typescript +const payer = initializeKeypair(connection, { + keypairPath: "/home/.config/solana/id.json", +}); +``` + ### 3. Create a mint with close authority Let's create a closable mint by creating the function `createClosableMint` in a @@ -399,7 +407,13 @@ export async function createClosableMint( ``` Now let's call this function in `src/index.ts`. First you'll need to import our -new function. Then paste the following under the right comment section: +new function by uncommenting the 3rd line. + +```ts +import { createClosableMint } from "./create-mint"; +``` + +Then paste the following under the right comment section: ```ts // CREATE A MINT WITH CLOSE AUTHORITY @@ -470,7 +484,7 @@ Underneath the minting functions, add the following code block: /** * Get mint information to verify supply */ -const mintInfo = await getMint( +let mintInfo = await getMint( connection, mintKeypair.publicKey, "finalized", @@ -591,7 +605,7 @@ Putting this all together we get: ```ts // CLOSE MINT -const mintInfo = await getMint( +mintInfo = await getMint( connection, mintKeypair.publicKey, "finalized", diff --git a/content/courses/token-extensions/transfer-fee.md b/content/courses/token-extensions/transfer-fee.md index 16cd7ff93..f7d574865 100644 --- a/content/courses/token-extensions/transfer-fee.md +++ b/content/courses/token-extensions/transfer-fee.md @@ -159,7 +159,7 @@ There are a couple of notes when transferring tokens with the `transfer fee` extension. First, the recipient is the one who "pays" for the fee. If I send 100 tokens -with basis points of 50 (5%), the recipient will receive 95 tokens (five +with basis points of 500 (5%), the recipient will receive 95 tokens (five withheld) Second, the fee is calculated not by the tokens sent, but the smallest unit of @@ -167,9 +167,9 @@ said token. In Solana programming, we always specify amounts to be transferred, minted or burned in their smallest unit. To send one SOL to someone, we actually send `1 * 10 ^ 9` lamports. Another way to look at it is if you wanted to send one US dollar, you're actually sending 100 pennies. Let's make this dollar a -token with a 50 basis points (5%) transfer fee. Sending one dollar, would result -in a five cent fee. Now let's say we have a max fee of 10 cents, this will -always be the highest fee, even if we send $10,000. +token with a 500 basis points (5%) transfer fee. Sending one dollar, would +result in a five cent fee. Now let's say we have a max fee of 10 cents, this +will always be the highest fee, even if we send $10,000. The calculation can be summed up like this: diff --git a/content/courses/tokens-and-nfts/metadata.yml b/content/courses/tokens-and-nfts/metadata.yml index 07e368033..ec3629722 100644 --- a/content/courses/tokens-and-nfts/metadata.yml +++ b/content/courses/tokens-and-nfts/metadata.yml @@ -6,7 +6,3 @@ lessons: - token-program-advanced - nfts-with-metaplex priority: 10 -# Uses deprecated @metaplex-foundation/js library for NFTs -# which also uses old RPC methods and no longer functions. -# TODO: Superteam to update -isHidden: true diff --git a/content/courses/tokens-and-nfts/token-program-advanced.md b/content/courses/tokens-and-nfts/token-program-advanced.md index 16c827c38..b979dd803 100644 --- a/content/courses/tokens-and-nfts/token-program-advanced.md +++ b/content/courses/tokens-and-nfts/token-program-advanced.md @@ -1,5 +1,5 @@ --- -title: Token burning and Delegation +title: Token Burning and Delegation objectives: - Understand why and how to burn tokens - Allow a token holder to allocate a limited amount of tokens to another @@ -8,20 +8,34 @@ description: "How to burn tokens, and approve/revoke token delegations on Solana." --- +### Summary + +- **Burning tokens** reduces the total supply of a token by removing them from + circulation. +- **Approving a delegate**, allows another account to transfer or burn a + specified amount of tokens from a token account while retaining original + account ownership. +- **Revoking a delegate**, removes their authority to act on behalf of the token + account owner. +- Each of these operations is facilitated through the `spl-token` library, + utilizing specific functions for each action. + ### Lesson -Finally, we'll cover burning tokens, and delegation. You may not use these in -your own application, so if you're really excited about NFTs, feel free to skip -to +In this lesson, we'll cover burning tokens and delegation. You may not have a +need for these in your own application, so if you're more interested in NFTs, +feel free to skip ahead to [creating NFTs with Metaplex](/content/courses/tokens-and-nfts/nfts-with-metaplex.md)! -### Burn Tokens +#### Burn Tokens Burning tokens is the process of decreasing the token supply of a given token mint. Burning tokens removes the tokens from the given token account and from broader circulation. -To burn tokens using the `spl-token` library, use the `burn` function. +To burn tokens using the `spl-token` library, use the +[`burn()`](https://solana-labs.github.io/solana-program-library/token/js/functions/burn.html#burn) +function. ```typescript import { burn } from "@solana/spl-token"; @@ -38,20 +52,22 @@ const transactionSignature = await burn( ); ``` -The `burn` function requires the following arguments: +The `burn()` function requires the following arguments: -- `connection` - the JSON-RPC connection to the cluster -- `payer` - the account of the payer for the transaction -- `account` - the token account to burn tokens from -- `mint` - the token mint associated with the token account -- `owner` - the account of the owner of the token account -- `amount` - the amount of tokens to burn +- `connection`: JSON-RPC connection to the cluster. +- `payer`: The account responsible for paying transaction fees. +- `account`: The token account from which tokens will be burned. +- `mint`: The token mint associated with the token account. +- `owner`: The owner of the token account. +- `amount`: The number of tokens to burn. -Under the hood, the `burn` function creates a transaction with instructions -obtained from the `createBurnInstruction` function: +Under the hood, the `burn()` function creates a transaction using the +instruction obtained from +[`createBurnInstruction()`](https://solana-labs.github.io/solana-program-library/token/js/functions/createBurnInstruction.html#createBurnInstruction) +function. ```typescript -import { PublicKey, Transaction } from "@solana/web3"; +import { PublicKey, Transaction } from "@solana/web3.js"; import { createBurnInstruction } from "@solana/spl-token"; async function buildBurnTransaction( @@ -68,16 +84,16 @@ async function buildBurnTransaction( } ``` -### Approve Delegate +#### Approve Delegate Approving a delegate is the process of authorizing another account to transfer -or burn tokens from a token account. When using a delegate, the authority over -the token account remains with the original owner. The maximum amount of tokens -a delegate may transfer or burn is specified at the time the owner of the token -account approves the delegate. Note that there can only be one delegate account -associated with a token account at any given time. +or burn tokens from a token account. The authority over the token account +remains with the original owner. The maximum number of tokens a delegate can +transfer or burn is defined when the owner approves the delegate. Only one +delegate can be associated with a token account at a time. -To approve a delegate using the `spl-token` library, you use the `approve` +To approve a delegate using the `spl-token` library, use the +[`approve()`](https://solana-labs.github.io/solana-program-library/token/js/functions/approve.html#approve) function. ```typescript @@ -91,21 +107,23 @@ const transactionSignature = await approve( ); ``` -The `approve` function returns a `TransactionSignature` that can be viewed on -Solana Explorer. The `approve` function requires the following arguments: +The `approve()` function returns a `TransactionSignature` that can be viewed on +Solana Explorer. It requires the following arguments: -- `connection` - the JSON-RPC connection to the cluster -- `payer` - the account of the payer for the transaction -- `account` - the token account to delegate tokens from -- `delegate` - the account the owner is authorizing to transfer or burn tokens -- `owner` - the account of the owner of the token account -- `amount` - the maximum number of tokens the delegate may transfer or burn +- `connection`: The JSON-RPC connection to the cluster. +- `payer`: The account of the payer for the transaction. +- `account`: The token account to delegate tokens from. +- `delegate`: The account authorized to transfer or burn tokens. +- `owner`: The account of the owner of the token account. +- `amount`: The maximum number of tokens the delegate can transfer or burn. -Under the hood, the `approve` function creates a transaction with instructions -obtained from the `createApproveInstruction` function: +Under the hood, the `approve()` function creates a transaction with instructions +obtained from the +[`createApproveInstruction()`](https://solana-labs.github.io/solana-program-library/token/js/functions/createApproveInstruction.html#createApproveInstruction) +function. ```typescript -import { PublicKey, Transaction } from "@solana/web3"; +import { PublicKey, Transaction } from "@solana/web3.js"; import { createApproveInstruction } from "@solana/spl-token"; async function buildApproveTransaction( @@ -122,14 +140,15 @@ async function buildApproveTransaction( } ``` -### Revoke Delegate +#### Revoke Delegate -A previously approved delegate for a token account can be later revoked. Once a -delegate is revoked, the delegate can no longer transfer tokens from the owner's -token account. Any remaining amount left untransferred from the previously -approved amount can no longer be transferred by the delegate. +A previously approved delegate for a token account can be revoked. Once revoked, +the delegate can no longer transfer tokens from the owner's token account. Any +untransferred amount from the previously approved tokens will no longer be +accessible by the delegate. -To revoke a delegate using the `spl-token` library, you use the `revoke` +To revoke a delegate using the `spl-token` library, use the +[`revoke()`](https://solana-labs.github.io/solana-program-library/token/js/functions/revoke.html#revoke) function. ```typescript @@ -138,20 +157,22 @@ import { revoke } from "@solana/spl-token"; const transactionSignature = await revoke(connection, payer, account, owner); ``` -The `revoke` function returns a `TransactionSignature` that can be viewed on -Solana Explorer. The `revoke` function requires the following arguments: +The `revoke()` function returns a `TransactionSignature` that can be viewed on +Solana Explorer. This function requires the following arguments: -- `connection` - the JSON-RPC connection to the cluster -- `payer` - the account of the payer for the transaction -- `account` - the token account to revoke the delegate authority from -- `owner` - the account of the owner of the token account +- `connection`: The JSON-RPC connection to the cluster. +- `payer`: The account responsible for paying the transaction fees. +- `account`: The token account from which to revoke the delegate authority. +- `owner`: The account of the owner of the token account. -Under the hood, the `revoke` function creates a transaction with instructions -obtained from the `createRevokeInstruction` function: +Under the hood, the `revoke()` function generates a transaction using the +instructions from the +[`createRevokeInstruction()`](https://solana-labs.github.io/solana-program-library/token/js/functions/createRevokeInstruction.html#createRevokeInstruction) +function: ```typescript -import { PublicKey, Transaction } from "@solana/web3"; -import { revoke } from "@solana/spl-token"; +import { PublicKey, Transaction } from "@solana/web3.js"; +import { createRevokeInstruction } from "@solana/spl-token"; async function buildRevokeTransaction( account: PublicKey, @@ -167,114 +188,187 @@ async function buildRevokeTransaction( ### Lab -This lab extends the lab from the -[previous chapter](/content/courses/tokens-and-nfts/token-program.md). +This lab extends the concepts covered in the previous lesson on the +[Token Program](/content/courses/tokens-and-nfts/token-program.md). -#### 1. Delegating tokens +#### 1. Delegating Tokens -Let's use `approve` from `spl-token` to authorize a delegate to transfer or burn -up to 50 tokens from our token account. +We will use the `approve()` function from the `spl-token` library to authorize a +delegate to transfer or burn up to 50 tokens from our token account. -Just like -[Transferring Tokens](/content/courses/tokens-and-nfts/token-program.md) in the -previous lab, you can -[add a second account on devnet](/content/courses/intro-to-solana/intro-to-cryptography.md) -if you like, or find a friend who has a devnet account! +Similar to the process of +[Transferring Tokens](/content/courses/tokens-and-nfts/token-program.md#transferring-tokens) +in the previous lab, you can +[add a second account on Devnet](/content/courses/intro-to-solana/intro-to-cryptography.md) +if desired or collaborate with a friend who has a Devnet account. -Create a new file `delegate-tokens.ts` +Create a new file named `delegate-tokens.ts`. For this example, we are using the +System Program ID as a delegate for demonstration, but you can use an actual +address that you want to delegate. -```typescript +```typescript filename="delegate-tokens.ts" import "dotenv/config"; import { getExplorerLink, getKeypairFromEnvironment, } from "@solana-developers/helpers"; -import { Connection, PublicKey, clusterApiUrl } from "@solana/web3.js"; import { - approve, - getOrCreateAssociatedTokenAccount, - revoke, -} from "@solana/spl-token"; - -const connection = new Connection(clusterApiUrl("devnet")); - + Connection, + PublicKey, + clusterApiUrl, + SystemProgram, +} from "@solana/web3.js"; +import { approve, getOrCreateAssociatedTokenAccount } from "@solana/spl-token"; + +const DEVNET_URL = clusterApiUrl("devnet"); +const TOKEN_DECIMALS = 2; +const DELEGATE_AMOUNT = 50; +const MINOR_UNITS_PER_MAJOR_UNITS = 10 ** TOKEN_DECIMALS; + +// Initialize connection and load user keypair +const connection = new Connection(DEVNET_URL); const user = getKeypairFromEnvironment("SECRET_KEY"); -console.log( - `🔑 Loaded our keypair securely, using an env file! Our public key is: ${user.publicKey.toBase58()}`, -); +console.log(`🔑 Loaded keypair. Public key: ${user.publicKey.toBase58()}`); -// Add the delegate public key here. -const delegate = new PublicKey("YOUR_DELEGATE_HERE"); +// Replace this with your actual address +// For this example, we will be using System Program's ID as a delegate +const delegatePublicKey = new PublicKey(SystemProgram.programId); -// Substitute in your token mint account -const tokenMintAccount = new PublicKey("YOUR_TOKEN_MINT_ADDRESS_HERE"); +// Substitute your token mint address +const tokenMintAddress = new PublicKey("YOUR_TOKEN_MINT_ADDRESS_HERE"); -// Get or create the source and destination token accounts to store this token -const sourceTokenAccount = await getOrCreateAssociatedTokenAccount( - connection, - user, - tokenMintAccount, - user.publicKey, -); - -// Our token has two decimal places -const MINOR_UNITS_PER_MAJOR_UNITS = Math.pow(10, 2); +try { + // Get or create the user's token account + const userTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + user, + tokenMintAddress, + user.publicKey, + ); -const approveTransactionSignature = await approve( - connection, - user, - sourceTokenAccount.address, - delegate, - user.publicKey, - 50 * MINOR_UNITS_PER_MAJOR_UNITS, -); + // Approve the delegate + const approveTransactionSignature = await approve( + connection, + user, + userTokenAccount.address, + delegatePublicKey, + user.publicKey, + DELEGATE_AMOUNT * MINOR_UNITS_PER_MAJOR_UNITS, + ); -console.log( - `Approve Delegate Transaction: ${getExplorerLink( + const explorerLink = getExplorerLink( "transaction", approveTransactionSignature, "devnet", - )}`, -); + ); + + console.log(`✅ Delegate approved. Transaction: ${explorerLink}`); +} catch (error) { + console.error( + `Error: ${error instanceof Error ? error.message : String(error)}`, + ); +} +``` + +Replace `YOUR_TOKEN_MINT_ADDRESS_HERE` with your token mint address obtained +from the previous lesson +[Token Program](/content/courses/tokens-and-nfts/token-program.md#create-the-token-mint). + +Run the script using `npx esrun delegate-tokens.ts`. You should see: + +```bash +🔑 Loaded keypair. Public key: GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM +✅ Delegate approved. Transaction: https://explorer.solana.com/tx/21tX6L7zk5tkHeoD7V1JYYW25VAWRfQrJPnxDcMXw94yuFbHxX4UZEgS6k6co9dBWe7PqFoMoWEVfbVA92Dk4xsQ?cluster=devnet ``` +Open the Explorer link, you will see the ‌approval information. + +![Delegate Tokens](/public/assets/courses/unboxed/delegate-token.png) + #### 2. Revoke Delegate -Lets revoke the `delegate` using the `spl-token` library's `revoke` function. +Let's revoke the `delegate` using the `spl-token` library's `revoke()` function. -Revoke will set delegate for the token account to null and reset the delegated -amount to 0. +Revoke will set the delegate for the token account to null and reset the +delegated amount to 0. -All we will need for this function is the token account and user. After the +Create a new file `revoke-approve-tokens.ts`. -```typescript -const revokeTransactionSignature = await revoke( - connection, - user, - sourceTokenAccount.address, - user.publicKey, -); +```typescript filename="revoke-approve-tokens.ts" +import "dotenv/config"; +import { + getExplorerLink, + getKeypairFromEnvironment, +} from "@solana-developers/helpers"; +import { Connection, PublicKey, clusterApiUrl } from "@solana/web3.js"; +import { revoke, getOrCreateAssociatedTokenAccount } from "@solana/spl-token"; -console.log( - `Revoke Delegate Transaction: ${getExplorerLink( +const DEVNET_URL = clusterApiUrl("devnet"); +// Substitute your token mint address +const TOKEN_MINT_ADDRESS = "YOUR_TOKEN_MINT_ADDRESS_HERE"; + +const connection = new Connection(DEVNET_URL); +const user = getKeypairFromEnvironment("SECRET_KEY"); + +console.log(`🔑 Loaded keypair. Public key: ${user.publicKey.toBase58()}`); + +try { + const tokenMintAddress = new PublicKey(TOKEN_MINT_ADDRESS); + + const userTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + user, + tokenMintAddress, + user.publicKey, + ); + + const revokeTransactionSignature = await revoke( + connection, + user, + userTokenAccount.address, + user.publicKey, + ); + + const explorerLink = getExplorerLink( "transaction", revokeTransactionSignature, "devnet", - )}`, -); + ); + + console.log(`✅ Revoke Delegate Transaction: ${explorerLink}`); +} catch (error) { + console.error( + `Error: ${error instanceof Error ? error.message : String(error)}`, + ); +} +``` + +Replace `YOUR_TOKEN_MINT_ADDRESS_HERE` with your mint token address obtained +from the previous lesson +[Token Program](/content/courses/tokens-and-nfts/token-program.md#create-the-token-mint). + +Run the script using `npx esrun revoke-approve-tokens.ts`. You should see: + +```bash +🔑 Loaded keypair. Public key: GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM +✅ Revoke Delegate Transaction: https://explorer.solana.com/tx/YTc2Vd41SiGiHf3iEPkBH3y164fMbV2TSH2hbe7WypT6K6Q2b3f31ryFWhypmBK2tXmvGYjXeYbuwxHeJvnZZX8?cluster=devnet ``` +Open the Explorer link, you will see the revoke information. + +![Revoke Approve Tokens](/public/assets/courses/unboxed/revoke-approve-tokens.png) + #### 3. Burn Tokens Finally, let's remove some tokens from circulation by burning them. -Use the `spl-token` library's `burn` function to remove half of your tokens from -circulation. +Use the `spl-token` library's `burn()` function to remove half of your tokens +from circulation. Now, call this function to burn 5 of the user's tokens. -Now call this new function in `main` to burn 25 of the user's tokens. +Create a new file `burn-tokens.ts`. -```typescript +```typescript filename="burn-tokens.ts" import "dotenv/config"; import { getExplorerLink, @@ -283,51 +377,70 @@ import { import { Connection, PublicKey, clusterApiUrl } from "@solana/web3.js"; import { getOrCreateAssociatedTokenAccount, burn } from "@solana/spl-token"; -const connection = new Connection(clusterApiUrl("devnet")); +const DEVNET_URL = clusterApiUrl("devnet"); +const TOKEN_DECIMALS = 2; +const BURN_AMOUNT = 5; +// Substitute your token mint address +const TOKEN_MINT_ADDRESS = "YOUR_TOKEN_MINT_ADDRESS_HERE"; +const connection = new Connection(DEVNET_URL); const user = getKeypairFromEnvironment("SECRET_KEY"); -console.log( - `🔑 Loaded our keypair securely, using an env file! Our public key is: ${user.publicKey.toBase58()}`, -); +console.log(`🔑 Loaded keypair. Public key: ${user.publicKey.toBase58()}`); -// Substitute in your token mint account -const tokenMintAccount = new PublicKey("YOUR_TOKEN_MINT_ADDRESS_HERE"); +try { + const tokenMintAccount = new PublicKey(TOKEN_MINT_ADDRESS); -// Get the account where the user stores these tokens -const sourceTokenAccount = await getOrCreateAssociatedTokenAccount( - connection, - user, - tokenMintAccount, - user.publicKey, -); + const userTokenAccount = await getOrCreateAssociatedTokenAccount( + connection, + user, + tokenMintAccount, + user.publicKey, + ); -// Our token has two decimal places -const MINOR_UNITS_PER_MAJOR_UNITS = Math.pow(10, 2); + const burnAmount = BURN_AMOUNT * 10 ** TOKEN_DECIMALS; -const transactionSignature = await burn( - connection, - user, - sourceTokenAccount.address, - tokenMintAccount, - user, - 25 * MINOR_UNITS_PER_MAJOR_UNITS, -); + const transactionSignature = await burn( + connection, + user, + userTokenAccount.address, + tokenMintAccount, + user, + burnAmount, + ); -console.log( - `Burn Transaction: ${getExplorerLink( + const explorerLink = getExplorerLink( "transaction", transactionSignature, "devnet", - )}`, -); + ); + + console.log(`✅ Burn Transaction: ${explorerLink}`); +} catch (error) { + console.error( + `Error: ${error instanceof Error ? error.message : String(error)}`, + ); +} ``` -Well done! You've now +Replace `YOUR_TOKEN_MINT_ADDRESS_HERE` with your mint token address obtained +from the previous chapter +[Token Program](/content/courses/tokens-and-nfts/token-program.md#create-the-token-mint). + +Run the script using `npx esrun burn-tokens.ts`. You should see: + +```bash +🔑 Loaded keypair. Public key: GprrWv9r8BMxQiWea9MrbCyK7ig7Mj8CcseEbJhDDZXM +✅ Burn Transaction: https://explorer.solana.com/tx/5Ufipgvsi5aLzzcr8QQ7mLXHyCwBDqsPxGTPinvFpjSiARnEDgFiPbD2ZiaDkkmwKDMoQ94bf5uqF2M7wjFWcKuv?cluster=devnet +``` + +Open the Explorer link, you will see the burn information. + +![Burn Tokens](/public/assets/courses/unboxed/burn-tokens.png) - +Well done! You've now completed the lab. -### Completed the lab? + Push your code to GitHub and [tell us what you thought of this lesson](https://form.typeform.com/to/IPH0UGz7#answers-lesson=72cab3b8-984b-4b09-a341-86800167cfc7)! diff --git a/content/courses/tokens-and-nfts/token-program.md b/content/courses/tokens-and-nfts/token-program.md index feb79adc9..cfcd7e625 100644 --- a/content/courses/tokens-and-nfts/token-program.md +++ b/content/courses/tokens-and-nfts/token-program.md @@ -28,7 +28,7 @@ description: - Creating Token Mints and Token Accounts requires allocating **rent** in SOL. The rent for a Token Account can be refunded when the account is closed. Additionally, tokens created with the - [Token Extensions Program](/developers/courses/token-extensions-for-mints/close-mint) + [Token Extensions Program](/content/courses/token-extensions/close-mint.md) can also close Token Mints. ### Lesson @@ -476,7 +476,7 @@ and `freezeAuthority`. Think of the token mint as the factory that makes tokens. Our `user`, as the `mintAuthority` is the person that runs the factory. -```typescript +```typescript filename="create-token-mint.ts" import { createMint } from "@solana/spl-token"; import "dotenv/config"; import { @@ -530,7 +530,7 @@ npm i @metaplex-foundation/mpl-token-metadata@2 Create a new file called `create-token-metadata.ts` -```typescript +```typescript filename="create-token-metadata.ts" // This uses "@metaplex-foundation/mpl-token-metadata@2" to create tokens import "dotenv/config"; import { @@ -617,7 +617,7 @@ const transactionLink = getExplorerLink( "devnet", ); -console.log(`✅ Transaction confirmed, explorer link is: ${transactionLink}!`); +console.log(`✅ Transaction confirmed, explorer link is: ${transactionLink}`); const tokenMintLink = getExplorerLink( "address", @@ -625,9 +625,12 @@ const tokenMintLink = getExplorerLink( "devnet", ); -console.log(`✅ Look at the token mint again: ${tokenMintLink}!`); +console.log(`✅ Look at the token mint again: ${tokenMintLink}`); ``` +Replace `YOUR_TOKEN_MINT_ADDRESS_HERE` with your address of the mint and run the +script using `npx esrun create-token-metadata.ts`. + You'll now see Solana Explorer is updated, showing the token's name and symbol on the mint! @@ -655,7 +658,7 @@ on a wallet and our mint address, making the account if it needs to. Remember to substitute in your token mint address below! -```typescript +```typescript filename="create-token-account.ts" import { getOrCreateAssociatedTokenAccount } from "@solana/spl-token"; import "dotenv/config"; import { @@ -697,7 +700,7 @@ const link = getExplorerLink( console.log(`✅ Created token Account: ${link}`); ``` -Run the script using `npx esrun create-token-mint.ts`. You should see: +Run the script using `npx esrun create-token-account.ts`. You should see: ```bash ✅ Success! Created token account: https://explorer.solana.com/address/CTjoLdEeK8rk4YWYW9ZqACyjHexbYKH3hEoagHxLVEFs?cluster=devnet @@ -707,16 +710,19 @@ Open the token account in Solana Explorer. Look at the owner - it's the account you made the ATA for! The balance will be zero, as we haven't sent any tokens there yet. Let's mint some tokens there and fix that! +Remember the address of your token account ! We'll use it to mint tokens. + #### Mint Tokens Now that we have a token mint and a token account, let's mint tokens to the token account. Recall that we set the `user` as the `mintAuthority` for the `mint` we created. -Create a function `mintTokens` that uses the `spl-token` function `mintTo` to -mint tokens: +Create an empty file called `mint-tokens.ts`. Then uses the `spl-token` function +`mintTo()` to mint tokens. Remember to substitute in your token mint address and +token account address below! -```typescript +```typescript filename="mint-tokens.ts" import { mintTo } from "@solana/spl-token"; import "dotenv/config"; import { @@ -776,7 +782,11 @@ associated token account - we can just look it up using mint of the token we want to send. Likewise, we can find (or make) an ATA for our recipient to hold this token too. -```typescript +Create an empty file called `transfer-tokens.ts`. Then replace +`YOUR_RECIPIENT_HERE` with your recipient public key and replace +`YOUR_TOKEN_MINT_ADDRESS_HERE` with your token mint address. + +```typescript filename="transfer-tokens.ts" import "dotenv/config"; import { getExplorerLink, @@ -803,7 +813,7 @@ const MINOR_UNITS_PER_MAJOR_UNITS = Math.pow(10, 2); console.log(`💸 Attempting to send 1 token to ${recipient.toBase58()}...`); -// Get or create the source and destination token accounts to store this token +// Get or create the source token account to store this token const sourceTokenAccount = await getOrCreateAssociatedTokenAccount( connection, sender, @@ -811,6 +821,7 @@ const sourceTokenAccount = await getOrCreateAssociatedTokenAccount( sender.publicKey, ); +// Get or create the destination token account to store this token const destinationTokenAccount = await getOrCreateAssociatedTokenAccount( connection, sender, @@ -830,7 +841,13 @@ const signature = await transfer( const explorerLink = getExplorerLink("transaction", signature, "devnet"); -console.log(`✅ Transaction confirmed, explorer link is: ${explorerLink}!`); +console.log(`✅ Transaction confirmed, explorer link is: ${explorerLink}`); +``` + +Run the script using `npx esrun transfer-tokens.ts`. You should see: + +```bash +✅ Transaction confirmed, explorer link is: https://explorer.solana.com/tx/SgV2j2DkaErYf7ERiB11USoZzGqAk8HPEqVJLP8HWdz9M61FSFgyEMXJycHQtfCooCAPBom7Vi3akEAwSUHQUsu?cluster=devnet ``` Open the Explorer link. You see your balance go down, and the recipient's diff --git a/content/guides/games/hello-world.md b/content/guides/games/hello-world.md index 23e4b4bab..cff9a9c9f 100644 --- a/content/guides/games/hello-world.md +++ b/content/guides/games/hello-world.md @@ -25,9 +25,9 @@ keywords: In this development guide, we will walkthrough a simple on-chain game using the Solana blockchain. This game, lovingly called _Tiny Adventure_, is a beginner-friendly Solana program created using the -[Anchor framework](/content/guides/getstarted/intro-to-anchor.md). The goal of -this program is to show you how to create a simple game that allows players to -track their position and move left or right. +[Anchor framework](/docs/programs/anchor). The goal of this program is to show +you how to create a simple game that allows players to track their position and +move left or right. > You can find the complete source code, available to deploy from your browser, > in this diff --git a/content/guides/getstarted/full-stack-solana-development.md b/content/guides/getstarted/full-stack-solana-development.md index d8346a050..01b6659ca 100644 --- a/content/guides/getstarted/full-stack-solana-development.md +++ b/content/guides/getstarted/full-stack-solana-development.md @@ -489,7 +489,7 @@ pub struct Counter { Make sure you go over the comments! -The `initialize` instruction instruction does only one this: it creates a new +The `initialize` instruction does only one thing: it creates a new account of the `Counter` type. To do this, we need to know who's paying, details of the account we're creating like the space and the address, and which program to use to create the account. diff --git a/content/guides/getstarted/hello-world-in-your-browser.md b/content/guides/getstarted/hello-world-in-your-browser.md index 23e78f8fc..1fece1426 100644 --- a/content/guides/getstarted/hello-world-in-your-browser.md +++ b/content/guides/getstarted/hello-world-in-your-browser.md @@ -117,9 +117,8 @@ use solana_program::{ ### Write your program logic Every Solana program must define an `entrypoint` that tells the Solana runtime -where to start executing your onchain code. Your program's -[entrypoint](/docs/programs/lang-rust.md#program-entrypoint) should provide a -public function named `process_instruction`: +where to start executing your onchain code. Your program's entrypoint should +provide a public function named `process_instruction`: ```rust // declare and export the program's entrypoint @@ -143,8 +142,7 @@ Every onchain program should return the `Ok` [result enum](https://doc.rust-lang.org/std/result/) with a value of `()`. This tells the Solana runtime that your program executed successfully without errors. -Our program above will simply -[log a message](/docs/programs/debugging.md#logging) of "_Hello, world!_" to the +Our program above will simply log a message of "_Hello, world!_" to the blockchain cluster, then gracefully exit with `Ok(())`. ### Build your program @@ -324,4 +322,3 @@ your local development environment: - [Interacting with Tokens and NFTs](/developers/courses/tokens.md) - [Developer Guides](/developers/guides/) - [Developing Games](/content/guides/games/getting-started-with-game-development.md) -- [Learn more about developing Solana programs with Rust](/docs/programs/lang-rust.md) diff --git a/content/guides/getstarted/how-to-cpi-with-signer.md b/content/guides/getstarted/how-to-cpi-with-signer.md index 063559faa..5908d17f9 100644 --- a/content/guides/getstarted/how-to-cpi-with-signer.md +++ b/content/guides/getstarted/how-to-cpi-with-signer.md @@ -14,9 +14,8 @@ keywords: - tutorial --- -This guide uses the -[Anchor framework](/content/guides/getstarted/intro-to-anchor.md) to demonstrate -how to transfer SOL using a [Cross-Program Invocation (CPI)](/docs/core/cpi.md) +This guide uses the [Anchor framework](/docs/programs/anchor) to demonstrate how +to transfer SOL using a [Cross-Program Invocation (CPI)](/docs/core/cpi.md) where the sender is a PDA that the program must sign for. A typical use case for this scenario is a program that manages diff --git a/content/guides/getstarted/how-to-cpi.md b/content/guides/getstarted/how-to-cpi.md index e21105cc1..1a1b49283 100644 --- a/content/guides/getstarted/how-to-cpi.md +++ b/content/guides/getstarted/how-to-cpi.md @@ -13,9 +13,8 @@ keywords: - tutorial --- -This guide uses the -[Anchor framework](/content/guides/getstarted/intro-to-anchor.md) to demonstrate -how to transfer SOL using a [Cross Program Invocation (CPI)](/docs/core/cpi.md). +This guide uses the [Anchor framework](/docs/programs/anchor) to demonstrate how +to transfer SOL using a [Cross Program Invocation (CPI)](/docs/core/cpi.md). Included below are three different, but functionally equivalent implementations that you may come across when reading or writing Solana programs. Here is a final reference program on diff --git a/content/guides/getstarted/intro-to-anchor.md b/content/guides/getstarted/intro-to-anchor.md deleted file mode 100644 index 7b098c196..000000000 --- a/content/guides/getstarted/intro-to-anchor.md +++ /dev/null @@ -1,750 +0,0 @@ ---- -date: 2024-04-24T00:00:00Z -difficulty: beginner -title: "Getting Started with the Anchor Framework" -description: - "This guide provides a basic overview of the Anchor framework. Anchor is a - very popular Rust framework for building Solana programs (known as 'smart - contracts' on other chains) that removes boilerplate, provides secure - defaults, and builds client programs automatically." -tags: - - rust - - anchor -keywords: - - tutorial ---- - -The [Anchor framework](https://www.anchor-lang.com/) uses -[Rust macros](https://doc.rust-lang.org/book/ch19-06-macros.html) to reduce -boilerplate code and simplify the implementation of common security checks -required for writing Solana programs. - -Think of Anchor as a framework for Solana programs much like Next.js is for web -development. Just as Next.js allows developers to create websites using React -instead of relying solely on HTML and TypeScript, Anchor provides a set of tools -and abstractions that make building Solana programs more intuitive and secure. - -The main macros found in an Anchor program include: - -- [`declare_id`](#declare_id-macro): Specifies the program's on-chain address -- [`#[program]`](#program-macro): Specifies the module containing the program's - instruction logic -- [`#[derive(Accounts)]`](#derive-accounts-macro): Applied to structs to - indicate a list of accounts required for an instruction -- [`#[account]`](#account-macro): Applied to structs to create custom account - types specific to the program - -## Anchor Program - -Below is a simple Anchor program with a single instruction that creates a new -account. We'll walk through it to explain the basic structure of an Anchor -program. Here is the program on -[Solana Playground](https://beta.solpg.io/660f3a86cffcf4b13384d022). - -```rust filename="lib.rs" -use anchor_lang::prelude::*; - -declare_id!("11111111111111111111111111111111"); - -#[program] -mod hello_anchor { - use super::*; - pub fn initialize(ctx: Context, data: u64) -> Result<()> { - ctx.accounts.new_account.data = data; - msg!("Changed data to: {}!", data); - Ok(()) - } -} - -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, -} - -#[account] -pub struct NewAccount { - data: u64, -} -``` - -### declare_id macro - -The -[`declare_id`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/attribute/account/src/lib.rs#L430) -macro is used to specify the on-chain address of the program (program ID). - -```rust filename="lib.rs" {3} -use anchor_lang::prelude::*; - -declare_id!("11111111111111111111111111111111"); -``` - -When you build an Anchor program for the first time, the framework generates a -new keypair used to deploy the program (unless specified otherwise). The public -key from this keypair should be used as the program ID in the `declare_id` -macro. - -- When using [Solana Playground](https://beta.solpg.io/), the program ID is - updated automatically for you and can be exported using the UI. -- When building locally, the program keypair can be found in - `/target/deploy/your_program_name.json` - -### program macro - -The -[`#[program]`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/attribute/program/src/lib.rs#L12) -macro specifies the module containing all of your program's instructions. Each -public function in the module represents a separate instruction for the program. - -In every function, the first parameter is always a `Context` type. Subsequent -parameters, which are optional, define any additional `data` required by the -instruction. - -```rust filename="lib.rs" {5, 8-12} -use anchor_lang::prelude::*; - -declare_id!("11111111111111111111111111111111"); - -#[program] -mod hello_anchor { - use super::*; - pub fn initialize(ctx: Context, data: u64) -> Result<()> { - ctx.accounts.new_account.data = data; - msg!("Changed data to: {}!", data); - Ok(()) - } -} - -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, -} - -#[account] -pub struct NewAccount { - data: u64, -} -``` - -The -[`Context`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/src/context.rs#L24) -type provides the instruction with access to the following non-argument inputs: - -```rust -pub struct Context<'a, 'b, 'c, 'info, T> { - /// Currently executing program id. - pub program_id: &'a Pubkey, - /// Deserialized accounts. - pub accounts: &'b mut T, - /// Remaining accounts given but not deserialized or validated. - /// Be very careful when using this directly. - pub remaining_accounts: &'c [AccountInfo<'info>], - /// Bump seeds found during constraint validation. This is provided as a - /// convenience so that handlers don't have to recalculate bump seeds or - /// pass them in as arguments. - pub bumps: BTreeMap, -} -``` - -`Context` is a generic type where `T` represents the set of accounts required by -an instruction. When defining the instruction's `Context`, the `T` type is a -struct that implements the `Accounts` trait (`Context`). - -This context parameter allows the instruction to access: - -- `ctx.accounts`: The instruction's accounts -- `ctx.program_id`: The address of the program itself -- `ctx.remaining_accounts`: All remaining accounts provided to the instruction - but not specified in the `Accounts` struct -- `ctx.bumps`: Bump seeds for any - [Program Derived Address (PDA)](/docs/core/pda.md) accounts specified in the - `Accounts` struct - -### derive(Accounts) macro - -The -[`#[derive(Accounts)]`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/derive/accounts/src/lib.rs#L630) -macro is applied to a struct and implements the -[`Accounts`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/src/lib.rs#L105) -trait. This is used to specify and validate a set of accounts required for a -particular instruction. - -```rust /Accounts/ {1} -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, -} -``` - -Each field in the struct represents an account that is required by an -instruction. The naming of each field is arbitrary, but it is recommended to use -a descriptive name that indicates the purpose of the account. - -```rust /signer/2 /new_account/ /system_program/ -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, -} -``` - -When building Solana programs, it's essential to validate the accounts provided -by the client. This validation is achieved in Anchor through account constraints -and specifying appropriate account types: - -- [Account Constraints](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/syn/src/parser/accounts/constraints.rs): - Constraints define additional conditions that an account must satisfy to be - considered valid for the instruction. Constraints are applied using the - `#[account(..)]` attribute, which is placed above an account field in the - `Accounts` struct. - - ```rust {3, 5} - #[derive(Accounts)] - pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, - } - ``` - -- [Account Types](https://github.com/coral-xyz/anchor/tree/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/src/accounts): - Anchor provides various account types to help ensure that the account provided - by the client matches what the program expects. - - ```rust /Account/2 /Signer/ /Program/ - #[derive(Accounts)] - pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, - } - ``` - -Accounts within the `Accounts` struct are accessible in an instruction through -the `Context`, using the `ctx.accounts` syntax. - -```rust filename="lib.rs" /ctx.accounts.new_account/ /new_account/ /Initialize/ {15-22} -use anchor_lang::prelude::*; - -declare_id!("11111111111111111111111111111111"); - -#[program] -mod hello_anchor { - use super::*; - pub fn initialize(ctx: Context, data: u64) -> Result<()> { - ctx.accounts.new_account.data = data; - msg!("Changed data to: {}!", data); - Ok(()) - } -} - -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, -} - -#[account] -pub struct NewAccount { - data: u64, -} -``` - -When an instruction in an Anchor program is invoked, the program performs the -following checks as specified the in `Accounts` struct: - -- Account Type Verification: It verifies that the accounts passed into the - instruction correspond to the account types defined in the instruction - Context. - -- Constraint Checks: It checks the accounts against any additional constraints - specified. - -This helps ensure that the accounts passed to the instruction from the client -are valid. If any checks fail, then the instruction fails with an error before -reaching the main logic of the instruction handler function. - -For more detailed examples, refer to the -[constraints](https://www.anchor-lang.com/docs/account-constraints) and -[account types](https://www.anchor-lang.com/docs/account-types) sections in the -Anchor documentation. - -### account macro - -The -[`#[account]`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/attribute/account/src/lib.rs#L66) -macro is applied to structs to define the format of a custom data account type -for a program. Each field in the struct represents a field that will be stored -in the account data. - -```rust {3} -#[account] -pub struct NewAccount { - data: u64, -} -``` - -This macro implements various traits -[detailed here](https://docs.rs/anchor-lang/latest/anchor_lang/attr.account.html). -The key functionalities of the `#[account]` macro include: - -- [Assign Ownership](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/attribute/account/src/lib.rs#L119-L132): - When creating an account, the ownership of the account is automatically - assigned to the program specified in the `declare_id`. -- [Set Discriminator](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/attribute/account/src/lib.rs#L101-L117): - A unique 8-byte discriminator, specific to the account type, is added as the - first 8 bytes of account data during its initialization. This helps in - differentiating account types and account validation. -- [Data Serialization and Deserialization](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/lang/attribute/account/src/lib.rs#L202-L246): - The account data corresponding to the account type is automatically serialized - and deserialized. - -```rust filename="lib.rs" /data/2,6 /NewAccount/ {24-27} -use anchor_lang::prelude::*; - -declare_id!("11111111111111111111111111111111"); - -#[program] -mod hello_anchor { - use super::*; - pub fn initialize(ctx: Context, data: u64) -> Result<()> { - ctx.accounts.new_account.data = data; - msg!("Changed data to: {}!", data); - Ok(()) - } -} - -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, -} - -#[account] -pub struct NewAccount { - data: u64, -} -``` - -In Anchor, an account discriminator is an 8-byte identifier, unique to each -account type. This identifier is derived from the first 8 bytes of the SHA256 -hash of the account type's name. The first 8 bytes in an account's data are -specifically reserved for this discriminator. - -```rust /8/1 -#[account(init, payer = signer, space = 8 + 8)] -pub new_account: Account<'info, NewAccount>, -``` - -The discriminator is used during the following two scenarios: - -- Initialization: During the initialization of an account, the discriminator is - set with the account type's discriminator. -- Deserialization: When account data is deserialized, the discriminator within - the data is checked against the expected discriminator of the account type. - -If there's a mismatch, it indicates that the client has provided an unexpected -account. This mechanism serves as an account validation check in Anchor -programs, ensuring the correct and expected accounts are used. - -## IDL File - -When an Anchor program is built, Anchor generates an interface description -language (IDL) file representing the structure of the program. This IDL file -provides a standardized JSON-based format for building program instructions and -fetching program accounts. - -Below are examples of how an IDL file relates to the program code. - -### Instructions - -The `instructions` array in the IDL corresponds with the instructions on the -program and specifies the required accounts and parameters for each instruction. - -```json filename="IDL.json" {6,8-10, 12} -{ - "version": "0.1.0", - "name": "hello_anchor", - "instructions": [ - { - "name": "initialize", - "accounts": [ - { "name": "newAccount", "isMut": true, "isSigner": true }, - { "name": "signer", "isMut": true, "isSigner": true }, - { "name": "systemProgram", "isMut": false, "isSigner": false } - ], - "args": [{ "name": "data", "type": "u64" }] - } - ], - "accounts": [ - { - "name": "NewAccount", - "type": { - "kind": "struct", - "fields": [{ "name": "data", "type": "u64" }] - } - } - ] -} -``` - -```rust filename="lib.rs" {8, 18, 20, 21} -use anchor_lang::prelude::*; - -declare_id!("11111111111111111111111111111111"); - -#[program] -mod hello_anchor { - use super::*; - pub fn initialize(ctx: Context, data: u64) -> Result<()> { - ctx.accounts.new_account.data = data; - msg!("Changed data to: {}!", data); - Ok(()) - } -} - -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, -} - -#[account] -pub struct NewAccount { - data: u64, -} -``` - -### Accounts - -The `accounts` array in the IDL corresponds with structs in the program -annotated with the `#[account]` macro, which specifies the structure of the -program's data accounts. - -```json filename="IDL.json" {16-22} -{ - "version": "0.1.0", - "name": "hello_anchor", - "instructions": [ - { - "name": "initialize", - "accounts": [ - { "name": "newAccount", "isMut": true, "isSigner": true }, - { "name": "signer", "isMut": true, "isSigner": true }, - { "name": "systemProgram", "isMut": false, "isSigner": false } - ], - "args": [{ "name": "data", "type": "u64" }] - } - ], - "accounts": [ - { - "name": "NewAccount", - "type": { - "kind": "struct", - "fields": [{ "name": "data", "type": "u64" }] - } - } - ] -} -``` - -```rust filename="lib.rs" {24-27} -use anchor_lang::prelude::*; - -declare_id!("11111111111111111111111111111111"); - -#[program] -mod hello_anchor { - use super::*; - pub fn initialize(ctx: Context, data: u64) -> Result<()> { - ctx.accounts.new_account.data = data; - msg!("Changed data to: {}!", data); - Ok(()) - } -} - -#[derive(Accounts)] -pub struct Initialize<'info> { - #[account(init, payer = signer, space = 8 + 8)] - pub new_account: Account<'info, NewAccount>, - #[account(mut)] - pub signer: Signer<'info>, - pub system_program: Program<'info, System>, -} - -#[account] -pub struct NewAccount { - data: u64, -} - -``` - -## Client - -Anchor provides a Typescript client library -([`@coral-xyz/anchor`](https://github.com/coral-xyz/anchor/tree/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor)) -that simplifies the process of interacting with Solana programs from the client. - -To use the client library, you first need to set up an instance of a -[`Program`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/index.ts#L58) -using the IDL file generated by Anchor. - -### Client Program - -Creating an instance of the `Program` requires the program's IDL, its on-chain -address (`programId`), and an -[`AnchorProvider`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/provider.ts#L55). -An `AnchorProvider` combines two things: - -- `Connection` - the connection to a [Solana cluster](/docs/core/clusters.md) - (i.e. localhost, devnet, mainnet) -- `Wallet` - (optional) a default wallet used to pay and sign transactions - -When building an Anchor program locally, the setup for creating an instance of -the `Program` is done automatically in the test file. The IDL file can be found -in the `/target` folder. - -```typescript showLineNumbers -import * as anchor from "@coral-xyz/anchor"; -import { Program, BN } from "@coral-xyz/anchor"; -import { HelloAnchor } from "../target/types/hello_anchor"; - -const provider = anchor.AnchorProvider.env(); -anchor.setProvider(provider); -const program = anchor.workspace.HelloAnchor as Program; -``` - -When integrating with a frontend using the -[wallet adapter](https://solana.com/developers/guides/wallets/add-solana-wallet-adapter-to-nextjs), -you'll need to manually set up the `AnchorProvider` and `Program`. - -```ts {8-9, 12} -import { Program, Idl, AnchorProvider, setProvider } from "@coral-xyz/anchor"; -import { useAnchorWallet, useConnection } from "@solana/wallet-adapter-react"; -import { IDL, HelloAnchor } from "./idl"; - -const { connection } = useConnection(); -const wallet = useAnchorWallet(); - -const provider = new AnchorProvider(connection, wallet, {}); -setProvider(provider); - -const programId = new PublicKey("..."); -const program = new Program(IDL, programId); -``` - -Alternatively, you can create an instance of the `Program` using only the IDL -and the `Connection` to a Solana cluster. This means if there is no default -`Wallet`, but allows you to use the `Program` to fetch accounts before a wallet -is connected. - -```ts {8-10} -import { Program } from "@coral-xyz/anchor"; -import { clusterApiUrl, Connection, PublicKey } from "@solana/web3.js"; -import { IDL, HelloAnchor } from "./idl"; - -const programId = new PublicKey("..."); -const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); - -const program = new Program(IDL, programId, { - connection, -}); -``` - -### Invoke Instructions - -Once the `Program` is set up, you can use the Anchor -[`MethodsBuilder`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/methods.ts#L155) -to build an instruction, a transaction, or build and send a transaction. The -basic format looks like this: - -- `program.methods` - This is the builder API for creating instruction calls - related to the program's IDL -- `.instructionName` - Specific instruction from the program IDL, passing in any - instruction data as comma-separated values -- `.accounts` - Pass in the address of each account required by the instruction - as specified in the IDL -- `.signers` - Optionally pass in an array of keypairs required as additional - signers by the instruction - -```ts -await program.methods - .instructionName(instructionData1, instructionData2) - .accounts({}) - .signers([]) - .rpc(); -``` - -Below are examples of how to invoke an instruction using the methods builder. - -#### rpc() - -The -[`rpc()`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/methods.ts#L283) -method -[sends a signed transaction](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/rpc.ts#L29) -with the specified instruction and returns a `TransactionSignature`. When using -`.rpc`, the `Wallet` from the `Provider` is automatically included as a signer. - -```ts {13} -// Generate keypair for the new account -const newAccountKp = new Keypair(); - -const data = new BN(42); -const transactionSignature = await program.methods - .initialize(data) - .accounts({ - newAccount: newAccountKp.publicKey, - signer: wallet.publicKey, - systemProgram: SystemProgram.programId, - }) - .signers([newAccountKp]) - .rpc(); -``` - -#### transaction() - -The -[`transaction()`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/methods.ts#L382) -method -[builds a `Transaction`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/transaction.ts#L18-L26) -and adds the specified instruction to the transaction (without automatically -sending). - -```ts {12} /transaction/1,2,4 -// Generate keypair for the new account -const newAccountKp = new Keypair(); - -const data = new BN(42); -const transaction = await program.methods - .initialize(data) - .accounts({ - newAccount: newAccountKp.publicKey, - signer: wallet.publicKey, - systemProgram: SystemProgram.programId, - }) - .transaction(); - -const transactionSignature = await connection.sendTransaction(transaction, [ - wallet.payer, - newAccountKp, -]); -``` - -#### instruction() - -The -[`instruction()`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/methods.ts#L348) -method -[builds a `TransactionInstruction`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/instruction.ts#L57-L61) -using the specified instruction. This is useful if you want to manually add the -instruction to a transaction and combine it with other instructions. - -```ts {12} /instruction/ -// Generate keypair for the new account -const newAccountKp = new Keypair(); - -const data = new BN(42); -const instruction = await program.methods - .initialize(data) - .accounts({ - newAccount: newAccountKp.publicKey, - signer: wallet.publicKey, - systemProgram: SystemProgram.programId, - }) - .instruction(); - -const transaction = new Transaction().add(instruction); - -const transactionSignature = await connection.sendTransaction(transaction, [ - wallet.payer, - newAccountKp, -]); -``` - -### Fetch Accounts - -The client `Program` also allows you to easily fetch and filter program -accounts. Simply use `program.account` and then specify the name of the account -type on the IDL. Anchor then deserializes and returns all accounts as specified. - -#### all() - -Use -[`all()`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/account.ts#L251) -to fetch all existing accounts for a specific account type. - -```ts /all/ -const accounts = await program.account.newAccount.all(); -``` - -#### memcmp - -Use `memcmp` to filter for accounts storing data that matches a specific value -at a specific offset. When calculating the offset, remember that the first 8 -bytes are reserved for the account discriminator in accounts created through an -Anchor program. Using `memcmp` requires you to understand the byte layout of the -data field for the account type you are fetching. - -```ts /memcmp/ -const accounts = await program.account.newAccount.all([ - { - memcmp: { - offset: 8, - bytes: "", - }, - }, -]); -``` - -#### fetch() - -Use -[`fetch()`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/account.ts#L165) -to get the account data for a specific account by passing in the account address - -```ts /fetch/ -const account = await program.account.newAccount.fetch(ACCOUNT_ADDRESS); -``` - -#### fetchMultiple() - -Use -[`fetchMultiple()`](https://github.com/coral-xyz/anchor/blob/852fcc77beb6302474a11e0f8e6f1e688021be36/ts/packages/anchor/src/program/namespace/account.ts#L200) -to get the account data for multiple accounts by passing in an array of account -addresses - -```ts /fetchMultiple/ -const accounts = await program.account.newAccount.fetchMultiple([ - ACCOUNT_ADDRESS_ONE, - ACCOUNT_ADDRESS_TWO, -]); -``` diff --git a/content/guides/getstarted/intro-to-native-rust.md b/content/guides/getstarted/intro-to-native-rust.md index 27e508e67..6f5125e6b 100644 --- a/content/guides/getstarted/intro-to-native-rust.md +++ b/content/guides/getstarted/intro-to-native-rust.md @@ -16,7 +16,7 @@ To write Solana programs without leveraging the Anchor framework, we use the This is the base library for writing onchain programs in Rust. For beginners, it is recommended to start with the -[Anchor framework](/content/guides/getstarted/intro-to-anchor.md). +[Anchor framework](/docs/programs/anchor). ## Program diff --git a/content/guides/getstarted/rust-to-solana.md b/content/guides/getstarted/rust-to-solana.md index 3c001d31b..c1d9d75a1 100644 --- a/content/guides/getstarted/rust-to-solana.md +++ b/content/guides/getstarted/rust-to-solana.md @@ -237,8 +237,7 @@ boilerplate code, speeding up the development cycle. Additionally, it provides some security checks by default, making Solana programs more secure. To create a new program, simply -[create a new Anchor project](https://solana.com/developers/guides/getstarted/intro-to-anchor) -in the Solana playground. +[create a new Anchor project](/docs/programs/anchor) in the Solana playground. Alternatively, [install the Anchor CLI](https://www.anchor-lang.com/docs/installation) locally, @@ -265,9 +264,6 @@ consider writing onchain programs in Rust, and offchain This guide has covered the basics of developing for Solana with Rust, from setup details and restrictions to development environments and frameworks. -For more Rust-related Solana resources, check out the -[Developing with Rust page](https://solana.com/docs/programs/lang-rust). - For other Solana program examples written with Rust, check out these [examples on GitHub](https://github.com/solana-labs/solana-program-library/tree/master/examples/rust). diff --git a/content/guides/token-extensions/dynamic-meta-data-nft.md b/content/guides/token-extensions/dynamic-meta-data-nft.md index 279400ac2..b52b6d83a 100644 --- a/content/guides/token-extensions/dynamic-meta-data-nft.md +++ b/content/guides/token-extensions/dynamic-meta-data-nft.md @@ -46,9 +46,9 @@ character's stats or inventory). ## Building the on-chain program In this developer guide, we will demonstrate how to build these Token Extension -based NFTs and custom metadata using an -[Anchor program](/content/guides/getstarted/intro-to-anchor.md). This program -will save the level and the collected resources of a game player within an NFT. +based NFTs and custom metadata using an [Anchor program](/docs/programs/anchor). +This program will save the level and the collected resources of a game player +within an NFT. This NFT will be created by the Anchor program so it is very easy to mint from the JavaScript client. Each NFT will have some basic structure provided via the diff --git a/docs/advanced/confirmation.md b/docs/advanced/confirmation.md index 02cc91ad5..b26144204 100644 --- a/docs/advanced/confirmation.md +++ b/docs/advanced/confirmation.md @@ -341,8 +341,8 @@ of the cluster state with one of the following methods: 1. Fetch your RPC node's highest processed slot by using the [`getSlot`](/docs/rpc/http/getSlot.mdx) RPC API with the `processed` commitment level and then call the - [`getMaxShredInsertSlot](/docs/rpc/http/getMaxShredInsertSlot.mdx) RPC API to - get the highest slot that your RPC node has received a “shred” of a block + [`getMaxShredInsertSlot`](/docs/rpc/http/getMaxShredInsertSlot.mdx) RPC API + to get the highest slot that your RPC node has received a “shred” of a block for. If the difference between these responses is very large, the cluster is producing blocks far ahead of what the RPC node has processed. 2. Call the `getLatestBlockhash` RPC API with the `confirmed` commitment level diff --git a/docs/advanced/index.md b/docs/advanced/index.md index 933c4c67f..11ae9d7f4 100644 --- a/docs/advanced/index.md +++ b/docs/advanced/index.md @@ -1,5 +1,5 @@ --- metaOnly: true -title: Advanced Concepts +title: Advanced Topics sidebarSortOrder: 3 --- diff --git a/docs/advanced/versions.md b/docs/advanced/versions.md index c86528558..fea794409 100644 --- a/docs/advanced/versions.md +++ b/docs/advanced/versions.md @@ -13,10 +13,9 @@ Versioned Transactions are the new transaction format that allow for additional functionality in the Solana runtime, including [Address Lookup Tables](/docs/advanced/lookup-tables.md). -While changes to [onchain](/docs/programs/index.md) programs are **NOT** -required to support the new functionality of versioned transactions (or for -backwards compatibility), developers **WILL** need update their client side code -to prevent +While changes to onchain programs are **NOT** required to support the new +functionality of versioned transactions (or for backwards compatibility), +developers **WILL** need update their client side code to prevent [errors due to different transaction versions](#max-supported-transaction-version). ## Current Transaction Versions diff --git a/docs/core/fees.md b/docs/core/fees.md index 8467f23d1..61a01b07c 100644 --- a/docs/core/fees.md +++ b/docs/core/fees.md @@ -197,9 +197,8 @@ syscalls, etc), each may consume a of compute units. > A program can log details about its compute usage, including how much remains -> in its alloted compute budget. See -> [program debugging](/docs/programs/debugging.md#monitoring-compute-budget-consumption) -> for more information. You can also find more information in this guide for +> in its alloted compute budget. You can also find more information in this +> guide for > [optimizing your compute usage](/content/guides/advanced/how-to-optimize-compute.md). Each transaction is alloted a [compute unit limit](#compute-unit-limit), either @@ -423,7 +422,7 @@ allocates, the higher the withheld rent deposit will be. ### Rent exempt -Accounts must maintain a lamport balance greater the minimum required to store +Accounts must maintain a lamport balance greater than the minimum required to store its respective data on-chain. This is called "_rent exempt_" and that balance is called the "_minimum balance for rent exemption_". diff --git a/docs/core/programs.md b/docs/core/programs.md index 330fb3d0c..5622a4c0d 100644 --- a/docs/core/programs.md +++ b/docs/core/programs.md @@ -9,10 +9,6 @@ In the Solana ecosystem, "smart contracts" are called programs. Each stores executable logic, organized into specific functions referred to as [instructions](/docs/core/transactions.md#instruction). -For additional topics related to Solana programs, refer to the pages included -under the [Deploying Programs](/docs/programs/index.md) section of this -documentation. - ## Key Points - Programs are on-chain accounts that contain executable code. This code is @@ -33,10 +29,10 @@ Solana programs are predominantly written in the [Rust](https://doc.rust-lang.org/book/) programming language, with two common approaches for development: -- [Anchor](/content/guides/getstarted/intro-to-anchor.md): A framework designed - for Solana program development. It provides a faster and simpler way to write - programs, using Rust macros to significantly reduce boilerplate code. For - beginners, it is recommended to start with the Anchor framework. +- [Anchor](/docs/programs/anchor): A framework designed for Solana program + development. It provides a faster and simpler way to write programs, using + Rust macros to significantly reduce boilerplate code. For beginners, it is + recommended to start with the Anchor framework. - [Native Rust](/content/guides/getstarted/intro-to-native-rust.md): This approach involves writing Solana programs in Rust without leveraging any diff --git a/docs/intro/dev.md b/docs/intro/dev.md index 1d9d5caab..42426b5db 100644 --- a/docs/intro/dev.md +++ b/docs/intro/dev.md @@ -68,16 +68,16 @@ If you're developing on the client-side, you can work with any programming language you're comfortable with. Solana has community-contributed SDKs to help developers interact with the Solana network in most popular languages : -| Language | SDK | -| ---------- | ------------------------------------------------------------------------------------------- | -| RUST | [solana_sdk](https://docs.rs/solana-sdk/latest/solana_sdk/) | -| Typescript | [@solana/web3.js](https://github.com/solana-labs/solana-web3.js) | -| Python | [solders](https://github.com/kevinheavey/solders) | -| Java | [solanaj](https://github.com/skynetcap/solanaj) | -| C++ | [solcpp](https://github.com/mschneider/solcpp) | -| Go | [solana-go](https://github.com/gagliardetto/solana-go) | -| Kotlin | [solanaKT](https://github.com/metaplex-foundation/SolanaKT) | -| Dart | [solana](https://github.com/espresso-cash/espresso-cash-public/tree/master/packages/solana) | +| Language | SDK | +| ---------- | -------------------------------------------------------------------------------------------------------- | +| RUST | [solana_sdk](https://docs.rs/solana-sdk/latest/solana_sdk/) | +| Typescript | [@solana/web3.js](https://github.com/solana-labs/solana-web3.js) | +| Python | [solders](https://github.com/kevinheavey/solders) | +| Java | [solanaj](https://github.com/skynetcap/solanaj) or [solana4j](https://github.com/LMAX-Exchange/solana4j) | +| C++ | [solcpp](https://github.com/mschneider/solcpp) | +| Go | [solana-go](https://github.com/gagliardetto/solana-go) | +| Kotlin | [solanaKT](https://github.com/metaplex-foundation/SolanaKT) or [sol4k](https://github.com/sol4k/sol4k) | +| Dart | [solana](https://github.com/espresso-cash/espresso-cash-public/tree/master/packages/solana) | You'll also need a connection with an RPC to interact with the network. You can either work with a [RPC infrastructure provider](https://solana.com/rpc) or @@ -149,8 +149,7 @@ your program based on your language preference: If you do not want to develop your programs locally, there's also the [online IDE Solana Playground](https://beta.solpg.io). Solana Playground allows you to write, test, and deploy programs on Solana. You can get started with -Solana Playground by -[following our guide](https://solana.com/developers/guides/getstarted/hello-world-in-your-browser). +Solana Playground by [following our quick start guide](/docs/intro/quick-start). ### Developer Environments @@ -191,8 +190,4 @@ problem can find your question! ## Next steps -You're now ready to get started building on Solana! - -- [Deploy your first Solana program in the browser](/content/guides/getstarted/hello-world-in-your-browser.md) -- [Get started building programs locally with Rust](/content/guides/getstarted/local-rust-hello-world.md) -- [Overview of writing Solana programs](/docs/programs/index.md) +[You're now ready to get started building on Solana!](/docs/intro/quick-start) diff --git a/docs/intro/installation.md b/docs/intro/installation.md index 26a4653b2..67bc80b21 100644 --- a/docs/intro/installation.md +++ b/docs/intro/installation.md @@ -265,7 +265,7 @@ writing Solana programs. There are two ways to install the Anchor CLI and tooling: -1. Using Anchor Version Manager (AVM) - the is the **recommended installation** +1. Using Anchor Version Manager (AVM) - is the **recommended installation** method since it simplifies updating Anchor versions in the future 2. Without AVM - this requires more a manual process to update Anchor versions later @@ -363,42 +363,6 @@ You should see output similar to the following: anchor-cli 0.30.1 ``` -If you encounter the error `type annotations needed for Box<_>` when installing -the Anchor CLI, try changing your Rust version to 1.79.0 and attempt the -installation again. - - -`"}> - -``` - Compiling time v0.3.29 -error[E0282]: type annotations needed for `Box<_>` - --> /home/x/.cargo/registry/src/index.crates.io-6f17d22bba15001f/time-0.3.29/src/format_description/parse/mod.rs:83:9 - | -83 | let items = format_items - | ^^^^^ -... -86 | Ok(items.into()) - | ---- type must be known at this point - | -help: consider giving `items` an explicit type, where the placeholders `_` are specified - | -83 | let items: Box<_> = format_items - | ++++++++ -``` - -You can find more context regarding this error -[here](https://github.com/coral-xyz/anchor/pull/3143) - - - - -Run the following command to install Rust 1.79.0: - -```shell -rustup default 1.79.0 -``` - When installing the Anchor CLI on Linux or WSL, you may encounter this error: ``` diff --git a/docs/intro/quick-start/program-derived-address.md b/docs/intro/quick-start/program-derived-address.md index 49ff4ac33..eba31e76e 100644 --- a/docs/intro/quick-start/program-derived-address.md +++ b/docs/intro/quick-start/program-derived-address.md @@ -525,7 +525,7 @@ The body of the function then: -Rebuld the program +Rebuild the program ```shell filename="Terminal" build @@ -599,7 +599,7 @@ The `Delete` struct defines the accounts required for the `delete` instruction: -Next, implement the logic for the `update` instruction. +Next, implement the logic for the `delete` instruction. ```rs filename="lib.rs" pub fn delete(_ctx: Context) -> Result<()> { diff --git a/docs/intro/quick-start/reading-from-network.md b/docs/intro/quick-start/reading-from-network.md index 6111d89c2..669c5bdea 100644 --- a/docs/intro/quick-start/reading-from-network.md +++ b/docs/intro/quick-start/reading-from-network.md @@ -30,12 +30,12 @@ Account Model. For more details, refer to the ## Fetch Playground Wallet - - Let's start by looking at a familiar account - your own Playground Wallet! We'll fetch this account and examine its structure to understand what a basic Solana account looks like. + + ### Open Example 1 Click this [link](https://beta.solpg.io/6671c5e5cffcf4b13384d198) to open the diff --git a/docs/intro/wallets.md b/docs/intro/wallets.md index b2f29a098..0973a6de5 100644 --- a/docs/intro/wallets.md +++ b/docs/intro/wallets.md @@ -60,8 +60,8 @@ first will need to create a wallet.** ## Supported Wallets Several browser and mobile app based wallets support Solana. Find some options -that might be right for you on the -[Solana Ecosystem](https://solana.com/ecosystem/explore?categories=wallet) page. +that might be right for you on the [Solana Wallets](https://solana.com/wallets) +page. For advanced users or developers, the [command-line wallets](https://docs.solanalabs.com/cli/wallets) may be more diff --git a/docs/programs/anchor/client-typescript.md b/docs/programs/anchor/client-typescript.md new file mode 100644 index 000000000..ba5c096ec --- /dev/null +++ b/docs/programs/anchor/client-typescript.md @@ -0,0 +1,354 @@ +--- +title: JS/TS Client +description: + Learn how to use Anchor's TypeScript client library to interact with Solana + progra +sidebarLabel: JS/TS Client +sidebarSortOrder: 3 +--- + +Anchor provides a Typescript client library +([@coral-xyz/anchor](https://github.com/coral-xyz/anchor/tree/v0.30.1/ts/packages/anchor)) +that simplifies the process of interacting with Solana programs from the client +in JavaScript or TypeScript. + +## Client Program + +To use the client library, first create an instance of a +[`Program`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/index.ts#L58) +using the [IDL file](/docs/programs/anchor/idl) generated by Anchor. + +Creating an instance of the `Program` requires the program's IDL and an +[`AnchorProvider`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/provider.ts#L55). +An `AnchorProvider` is an abstraction that combines two things: + +- `Connection` - the connection to a [Solana cluster](/docs/core/clusters.md) + (i.e. localhost, devnet, mainnet) +- `Wallet` - (optional) a default wallet used to pay and sign transactions + + + + + +When integrating with a frontend using the +[wallet adapter](https://solana.com/developers/guides/wallets/add-solana-wallet-adapter-to-nextjs), +you'll need to set up the `AnchorProvider` and `Program`. + +```ts {9-10, 12-14} +import { Program, AnchorProvider, setProvider } from "@coral-xyz/anchor"; +import { useAnchorWallet, useConnection } from "@solana/wallet-adapter-react"; +import type { HelloAnchor } from "./idlType"; +import idl from "./idl.json"; + +const { connection } = useConnection(); +const wallet = useAnchorWallet(); + +const provider = new AnchorProvider(connection, wallet, {}); +setProvider(provider); + +export const program = new Program(idl as HelloAnchor, { + connection, +}); +``` + +In the code snippet above: + +- `idl.json` is the IDL file generated by Anchor, found at + `/target/idl/.json` in an Anchor project. +- `idlType.ts` is the IDL type (for use with TS), found at + `/target/types/.ts` in an Anchor project. + +Alternatively, you can create an instance of the `Program` using only the IDL +and the `Connection` to a Solana cluster. This means there is no default +`Wallet`, but allows you to use the `Program` to fetch accounts or build +instructions without a connected wallet. + +```ts {8-10} +import { clusterApiUrl, Connection, PublicKey } from "@solana/web3.js"; +import { Program } from "@coral-xyz/anchor"; +import type { HelloAnchor } from "./idlType"; +import idl from "./idl.json"; + +const connection = new Connection(clusterApiUrl("devnet"), "confirmed"); + +export const program = new Program(idl as HelloAnchor, { + connection, +}); +``` + + + + +Anchor automatically sets up a `Program` instance in the default test file of +new projects. However, this setup differs from how you'd initialize a `Program` +outside the Anchor workspace, such as in React or Node.js applications. + +```typescript +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { HelloAnchor } from "../target/types/hello_anchor"; + +describe("hello_anchor", () => { + // Configure the client to use the local cluster. + anchor.setProvider(anchor.AnchorProvider.env()); + + const program = anchor.workspace.HelloAnchor as Program; + + it("Is initialized!", async () => { + // Add your test here. + const tx = await program.methods.initialize().rpc(); + console.log("Your transaction signature", tx); + }); +}); +``` + + + + +## Invoke Instructions + +Once the `Program` is set up using a program IDL, you can use the Anchor +[`MethodsBuilder`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/methods.ts#L155) +to: + +- Build individual instructions +- Build transactions +- Build and send transactions + +The basic format looks like the following: + + + + + +`program.methods` - This is the builder API for creating instruction calls from +the program's IDL + +```ts /methods/ {1} +await program.methods + .instructionName(instructionData) + .accounts({}) + .signers([]) + .rpc(); +``` + + + + +Following `.methods`, specify the name of an instruction from the program IDL, +passing in any required arguments as comma-separated values. + +```ts /instructionName/ /instructionData1/ /instructionData2/ {2} +await program.methods + .instructionName(instructionData1, instructionData2) + .accounts({}) + .signers([]) + .rpc(); +``` + + + + +`.accounts` - Pass in the address of the accounts required by the instruction as +specified in the IDL + +```ts /accounts/ {3} +await program.methods + .instructionName(instructionData) + .accounts({}) + .signers([]) + .rpc(); +``` + +Note that certain account addresses don't need to be explicitly provided, as the +Anchor client can automatically resolve them. These typically include: + +- Common accounts (ex. the System Program) +- Accounts where the address is a PDA (Program Derived Address) + + + + +`.signers` - Optionally pass in an array of keypairs required as additional +signers by the instruction. This is commonly used when creating new accounts +where the account address is the public key of a newly generated keypair. + +```ts /signers/ {4} +await program.methods + .instructionName(instructionData) + .accounts({}) + .signers([]) + .rpc(); +``` + +Note that `.signers` should only be used when also using `.rpc()`. When using +`.transaction()` or `.instruction()`, signers should be added to the transaction +before sending. + + + + +Anchor provides multiple methods for building program instructions: + + + + + +The +[`rpc()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/methods.ts#L283) +method +[sends a signed transaction](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/rpc.ts#L29) +with the specified instruction and returns a `TransactionSignature`. + +When using `.rpc`, the `Wallet` from the `Provider` is automatically included as +a signer. + +```ts {13} +// Generate keypair for the new account +const newAccountKp = new Keypair(); + +const data = new BN(42); +const transactionSignature = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + systemProgram: SystemProgram.programId, + }) + .signers([newAccountKp]) + .rpc(); +``` + + + + +The +[`transaction()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/methods.ts#L382) +method +[builds a `Transaction`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/transaction.ts#L18-L26) +with the specified instruction without sending the transaction. + +```ts {12} /transaction()/1,2,4 +// Generate keypair for the new account +const newAccountKp = new Keypair(); + +const data = new BN(42); +const transaction = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + systemProgram: SystemProgram.programId, + }) + .transaction(); + +const transactionSignature = await connection.sendTransaction(transaction, [ + wallet.payer, + newAccountKp, +]); +``` + + + + +The +[`instruction()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/methods.ts#L348) +method +[builds a `TransactionInstruction`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/instruction.ts#L57-L61) +using the specified instruction. This is useful if you want to manually add the +instruction to a transaction and combine it with other instructions. + +```ts {12} /instruction()/ +// Generate keypair for the new account +const newAccountKp = new Keypair(); + +const data = new BN(42); +const instruction = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + systemProgram: SystemProgram.programId, + }) + .instruction(); + +const transaction = new Transaction().add(instruction); + +const transactionSignature = await connection.sendTransaction(transaction, [ + wallet.payer, + newAccountKp, +]); +``` + + + + +## Fetch Accounts + +The `Program` client simplifies the process of fetching and deserializing +accounts created by your Anchor program. + +Use `program.account` followed by the name of the account type defined in the +IDL. Anchor provides multiple methods for fetching accounts. + + + + + +Use +[`all()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/account.ts#L251) +to fetch all existing accounts for a specific account type. + +```ts /all/ +const accounts = await program.account.newAccount.all(); +``` + + + + +Use `memcmp` (memory compare) to filter for account data that matches a specific +value at a specific offset. Using `memcmp` requires you to understand the byte +layout of the data field for the account type you are fetching. + +When calculating the offset, remember that the first 8 bytes in accounts created +by an Anchor program are reserved for the account discriminator. + +```ts /memcmp/ +const accounts = await program.account.newAccount.all([ + { + memcmp: { + offset: 8, + bytes: "", + }, + }, +]); +``` + + + + +Use +[`fetch()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/account.ts#L165) +to fetch the account data for a single account + +```ts /fetch/ +const account = await program.account.newAccount.fetch(ACCOUNT_ADDRESS); +``` + + + + +Use +[`fetchMultiple()`](https://github.com/coral-xyz/anchor/blob/v0.30.1/ts/packages/anchor/src/program/namespace/account.ts#L200) +to fetch the account data for multiple accounts by passing in an array of +account addresses + +```ts /fetchMultiple/ +const accounts = await program.account.newAccount.fetchMultiple([ + ACCOUNT_ADDRESS_ONE, + ACCOUNT_ADDRESS_TWO, +]); +``` + + + diff --git a/docs/programs/anchor/cpi.md b/docs/programs/anchor/cpi.md new file mode 100644 index 000000000..092b9481f --- /dev/null +++ b/docs/programs/anchor/cpi.md @@ -0,0 +1,551 @@ +--- +title: CPIs with Anchor +description: + Learn how to implement Cross Program Invocations (CPIs) in Anchor programs, + enabling interaction between different programs on Solana +sidebarLabel: CPIs with Anchor +sidebarSortOrder: 5 +--- + +[Cross Program Invocations (CPI)](/docs/core/cpi.md) refer to the process of one +program invoking instructions of another program, which enables the +composibility of programs on Solana. + +This section will cover the basics of implementing CPIs in an Anchor program, +using a simple SOL transfer instruction as a practical example. Once you +understand the basics of how to implement a CPI, you can apply the same concepts +for any instruction. + +## Cross Program Invocations + +Let's examine a program that implements a CPI to the System Program's transfer +instruction. Here is the example program on +[Solana Playground](https://beta.solpg.io/66df2751cffcf4b13384d35a). + +The `lib.rs` file includes a single `sol_transfer` instruction. When the +`sol_transfer` instruction on the Anchor program is invoked, the program +internally invokes the transfer instruction of the System Program. + +```rs filename="lib.rs" /sol_transfer/ /transfer/ {23} +use anchor_lang::prelude::*; +use anchor_lang::system_program::{transfer, Transfer}; + +declare_id!("9AvUNHjxscdkiKQ8tUn12QCMXtcnbR9BVGq3ULNzFMRi"); + +#[program] +pub mod cpi { + use super::*; + + pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.sender.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, + ); + + transfer(cpi_context, amount)?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct SolTransfer<'info> { + #[account(mut)] + sender: Signer<'info>, + #[account(mut)] + recipient: SystemAccount<'info>, + system_program: Program<'info, System>, +} +``` + +The `cpi.test.ts` file shows how to invoke the Anchor program's `sol_transfer` +instruction and logs a link to the transaction details on SolanaFM. + +```ts filename="cpi.test.ts" +it("SOL Transfer Anchor", async () => { + const transactionSignature = await program.methods + .solTransfer(new BN(transferAmount)) + .accounts({ + sender: sender.publicKey, + recipient: recipient.publicKey, + }) + .rpc(); + + console.log( + `\nTransaction Signature:` + + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); +}); +``` + +You can build, deploy, and run the test for this example on Playground to view +the transaction details on the [SolanaFM explorer](https://solana.fm/). + +The transaction details will show that the Anchor program was first invoked +(instruction 1), which then invokes the System Program (instruction 1.1), +resulting in a successful SOL transfer. + +![Transaction Details](/assets/docs/core/cpi/transaction-details.png) + +### Example 1 Explanation + +Implementing a CPI follows the same pattern as building an instruction to add to +a transaction. When implementing a CPI, we must specify the program ID, +accounts, and instruction data for the instruction being called. + +The System Program's transfer instruction requires two accounts: + +- `from`: The account sending SOL. +- `to`: The account receiving SOL. + +In the example program, the `SolTransfer` struct specifies the accounts required +by the transfer instruction. The System Program is also included because the CPI +invokes the System Program. + +```rust /sender/ /recipient/ /system_program/ +#[derive(Accounts)] +pub struct SolTransfer<'info> { + #[account(mut)] + sender: Signer<'info>, // from account + #[account(mut)] + recipient: SystemAccount<'info>, // to account + system_program: Program<'info, System>, // program ID +} +``` + +The following tabs present three approaches to implementing Cross Program +Invocations (CPIs), each at a different level of abstraction. All examples are +functionally equivalent. The main purpose is to illustrate the implementation +details of the CPI. + + + + + +The `sol_transfer` instruction included in the example code shows a typical +approach for constructing CPIs using the Anchor framework. + +This approach involves creating a +[`CpiContext`](https://docs.rs/anchor-lang/latest/anchor_lang/context/struct.CpiContext.html), +which includes the `program_id` and accounts required for the instruction being +called, followed by a helper function (`transfer`) to invoke a specific +instruction. + +```rust +use anchor_lang::system_program::{transfer, Transfer}; +``` + +```rust /cpi_context/ {14} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.sender.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, + ); + + transfer(cpi_context, amount)?; + Ok(()) +} +``` + +The `cpi_context` variable specifies the program ID (System Program) and +accounts (sender and recipient) required by the transfer instruction. + +```rust /program_id/ /from_pubkey/ /to_pubkey/ +let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, +); +``` + +The `cpi_context` and `amount` are then passed into the `transfer` function to +execute the CPI invoking the transfer instruction of the System Program. + +```rust +transfer(cpi_context, amount)?; +``` + + + + +This example shows a different approach to implementing a CPI using the `invoke` +function and +[`system_instruction::transfer`](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/system_instruction.rs#L881), +which is generally seen in native Rust programs. + +Under the hood, the previous example is an abstraction of this implementation. +The example below is functionally equivalent to the previous example. + +```rust +use anchor_lang::solana_program::{program::invoke, system_instruction}; +``` + +```rust /instruction/1,3 {9} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.sender.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let instruction = + &system_instruction::transfer(&from_pubkey.key(), &to_pubkey.key(), amount); + + invoke(instruction, &[from_pubkey, to_pubkey, program_id])?; + Ok(()) +} +``` + + + + +You can also manually build the instruction to pass into the `invoke()` +function. This is useful when there is no crate available to help build the +instruction you want to invoke. This approach requires you to specify the +`AccountMeta`s for the instruction and correctly create the instruction data +buffer. + +The `sol_transfer` instruction below is a manual implementation of a CPI to the +System Program's transfer instruction. + +```rust /instruction/10,13 {28} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.sender.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + // Prepare instruction AccountMetas + let account_metas = vec![ + AccountMeta::new(from_pubkey.key(), true), + AccountMeta::new(to_pubkey.key(), false), + ]; + + // SOL transfer instruction discriminator + let instruction_discriminator: u32 = 2; + + // Prepare instruction data + let mut instruction_data = Vec::with_capacity(4 + 8); + instruction_data.extend_from_slice(&instruction_discriminator.to_le_bytes()); + instruction_data.extend_from_slice(&amount.to_le_bytes()); + + // Create instruction + let instruction = Instruction { + program_id: program_id.key(), + accounts: account_metas, + data: instruction_data, + }; + + // Invoke instruction + invoke(&instruction, &[from_pubkey, to_pubkey, program_id])?; + Ok(()) +} +``` + +The `sol_transfer` instruction above replicates this +[example](/docs/core/transactions.md#manual-sol-transfer) of manually building a +SOL transfer instruction. It follows the same pattern as building an +[instruction](/docs/core/transactions.md#instruction) to add to a transaction. + +When building an instruction in Rust, use the following syntax to specify the +`AccountMeta` for each account: + +```rust +AccountMeta::new(account1_pubkey, true), // writable, signer +AccountMeta::new(account2_pubkey, false), // writable, not signer +AccountMeta::new_readonly(account3_pubkey, false), // not writable, not signer +AccountMeta::new_readonly(account4_pubkey, true), // writable, signer +``` + + + + +Here is a reference program on +[Solana Playground](https://beta.solpg.io/github.com/ZYJLiu/doc-examples/tree/main/cpi) +which includes all 3 examples. + +## Cross Program Invocations with PDA Signers + +Next, let's examine a program that implements a CPI to the System Program's +transfer instruction where the sender is a Program Derived Address (PDA) that +must be "signed" for by the program. Here is the example program on +[Solana Playground](https://beta.solpg.io/66df2bd2cffcf4b13384d35b). + +The `lib.rs` file includes the following program with a single `sol_transfer` +instruction. + +```rust filename="lib.rs" +use anchor_lang::prelude::*; +use anchor_lang::system_program::{transfer, Transfer}; + +declare_id!("3455LkCS85a4aYmSeNbRrJsduNQfYRY82A7eCD3yQfyR"); + +#[program] +pub mod cpi { + use super::*; + + pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.pda_account.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let seed = to_pubkey.key(); + let bump_seed = ctx.bumps.pda_account; + let signer_seeds: &[&[&[u8]]] = &[&[b"pda", seed.as_ref(), &[bump_seed]]]; + + let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, + ) + .with_signer(signer_seeds); + + transfer(cpi_context, amount)?; + Ok(()) + } +} + +#[derive(Accounts)] +pub struct SolTransfer<'info> { + #[account( + mut, + seeds = [b"pda", recipient.key().as_ref()], + bump, + )] + pda_account: SystemAccount<'info>, + #[account(mut)] + recipient: SystemAccount<'info>, + system_program: Program<'info, System>, +} +``` + +The `cpi.test.ts` file shows how to invoke the Anchor program's `sol_transfer` +instruction and logs a link to the transaction details on SolanaFM. + +It shows how to derive the PDA using the seeds specified in the program: + +```ts /pda/ /wallet.publicKey/ +const [PDA] = PublicKey.findProgramAddressSync( + [Buffer.from("pda"), wallet.publicKey.toBuffer()], + program.programId, +); +``` + +The first step in this example is to fund the PDA account with a basic SOL +transfer from the Playground wallet. + +```ts filename="cpi.test.ts" +it("Fund PDA with SOL", async () => { + const transferInstruction = SystemProgram.transfer({ + fromPubkey: wallet.publicKey, + toPubkey: PDA, + lamports: transferAmount, + }); + + const transaction = new Transaction().add(transferInstruction); + + const transactionSignature = await sendAndConfirmTransaction( + connection, + transaction, + [wallet.payer], // signer + ); + + console.log( + `\nTransaction Signature:` + + `https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); +}); +``` + +Once the PDA is funded with SOL, invoke the `sol_transfer` instruction. This +instruction transfers SOL from the PDA account back to the `wallet` account via +a CPI to the System Program, which is "signed" for by the program. + +```ts +it("SOL Transfer with PDA signer", async () => { + const transactionSignature = await program.methods + .solTransfer(new BN(transferAmount)) + .accounts({ + pdaAccount: PDA, + recipient: wallet.publicKey, + }) + .rpc(); + + console.log( + `\nTransaction Signature: https://solana.fm/tx/${transactionSignature}?cluster=devnet-solana`, + ); +}); +``` + +You can build, deploy, and run the test to view the transaction details on the +[SolanaFM explorer](https://solana.fm/). + +The transaction details will show that the custom program was first invoked +(instruction 1), which then invokes the System Program (instruction 1.1), +resulting in a successful SOL transfer. + +![Transaction Details](/assets/docs/core/cpi/transaction-details-pda.png) + +### Example 2 Explanation + +In the example code, the `SolTransfer` struct specifies the accounts required by +the transfer instruction. + +The sender is a PDA that the program must sign for. The `seeds` to derive the +address for the `pda_account` include the hardcoded string "pda" and the address +of the `recipient` account. This means the address for the `pda_account` is +unique for each `recipient`. + +```rust /pda_account/ /recipient/2 /system_program/ +#[derive(Accounts)] +pub struct SolTransfer<'info> { + #[account( + mut, + seeds = [b"pda", recipient.key().as_ref()], + bump, + )] + pda_account: SystemAccount<'info>, + #[account(mut)] + recipient: SystemAccount<'info>, + system_program: Program<'info, System>, +} +``` + +The Javascript equivalent to derive the PDA is included in the test file. + +```ts /pda/ /wallet.publicKey/ +const [PDA] = PublicKey.findProgramAddressSync( + [Buffer.from("pda"), wallet.publicKey.toBuffer()], + program.programId, +); +``` + +The following tabs present two approaches to implementing Cross Program +Invocations (CPIs), each at a different level of abstraction. Both examples are +functionally equivalent. The main purpose is to illustrate the implementation +details of the CPI. + + + + + +The `sol_transfer` instruction included in the example code shows a typical +approach for constructing CPIs using the Anchor framework. + +This approach involves creating a +[`CpiContext`](https://docs.rs/anchor-lang/latest/anchor_lang/context/struct.CpiContext.html), +which includes the `program_id` and accounts required for the instruction being +called, followed by a helper function (`transfer`) to invoke a specific +instruction. + +```rust /cpi_context/ {19} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.pda_account.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let seed = to_pubkey.key(); + let bump_seed = ctx.bumps.pda_account; + let signer_seeds: &[&[&[u8]]] = &[&[b"pda", seed.as_ref(), &[bump_seed]]]; + + let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, + ) + .with_signer(signer_seeds); + + transfer(cpi_context, amount)?; + Ok(()) +} +``` + +When signing with PDAs, the seeds and bump seed are included in the +`cpi_context` as `signer_seeds` using `with_signer()`. The bump seed for a PDA +can be accessed using `ctx.bumps` followed by the name of the PDA account. + +```rust /signer_seeds/ /bump_seed/ {3} +let seed = to_pubkey.key(); +let bump_seed = ctx.bumps.pda_account; +let signer_seeds: &[&[&[u8]]] = &[&[b"pda", seed.as_ref(), &[bump_seed]]]; + +let cpi_context = CpiContext::new( + program_id, + Transfer { + from: from_pubkey, + to: to_pubkey, + }, +) +.with_signer(signer_seeds); +``` + +The `cpi_context` and `amount` are then passed into the `transfer` function to +execute the CPI. + +```rust +transfer(cpi_context, amount)?; +``` + +When the CPI is processed, the Solana runtime will validate that the provided +seeds and caller program ID derive a valid PDA. The PDA is then added as a +signer on the invocation. This mechanism allows for programs to sign for PDAs +that are derived from their program ID. + + + + +Under the hood, the previous example is a wrapper around the `invoke_signed()` +function which uses +[`system_instruction::transfer`](https://github.com/solana-labs/solana/blob/27eff8408b7223bb3c4ab70523f8a8dca3ca6645/sdk/program/src/system_instruction.rs#L881) +to build the instruction. + +This example shows how to use the `invoke_signed()` function to make a CPI +signed for by a PDA. + +```rust +use anchor_lang::solana_program::{program::invoke_signed, system_instruction}; +``` + +```rust /instruction/1,3 {13} +pub fn sol_transfer(ctx: Context, amount: u64) -> Result<()> { + let from_pubkey = ctx.accounts.pda_account.to_account_info(); + let to_pubkey = ctx.accounts.recipient.to_account_info(); + let program_id = ctx.accounts.system_program.to_account_info(); + + let seed = to_pubkey.key(); + let bump_seed = ctx.bumps.pda_account; + let signer_seeds: &[&[&[u8]]] = &[&[b"pda", seed.as_ref(), &[bump_seed]]]; + + let instruction = + &system_instruction::transfer(&from_pubkey.key(), &to_pubkey.key(), amount); + + invoke_signed(instruction, &[from_pubkey, to_pubkey, program_id], signer_seeds)?; + Ok(()) +} +``` + +This implementation is functionally equivalent to the previous example. The +`signer_seeds` are passed into the `invoke_signed` function. + + + + +Here is a reference program on +[Solana Playground](https://beta.solpg.io/github.com/ZYJLiu/doc-examples/tree/main/cpi-pda) +which includes both examples. diff --git a/docs/programs/anchor/idl.md b/docs/programs/anchor/idl.md new file mode 100644 index 000000000..924980b42 --- /dev/null +++ b/docs/programs/anchor/idl.md @@ -0,0 +1,463 @@ +--- +title: IDL File +description: + Learn about the Interface Description Language (IDL) file in Anchor, its + purpose, benefits, and how it simplifies program-client interactions +sidebarLabel: IDL File +sidebarSortOrder: 2 +--- + +An Interface Description Language (IDL) file provides a standardized JSON file +describing the program's instructions and accounts. This file simplifies the +process of integrating your on-chain program with client applications. + +Key Benefits of the IDL: + +- Standardization: Provides a consistent format for describing the program's + instructions and accounts +- Client Generation: Used to generate client code to interact with the program + +The `anchor build` command generates an IDL file located at +`/target/idl/.json`. + +The code snippets below highlights how the program, IDL, and client relate to +each other. + +## Program Instructions + +The `instructions` array in the IDL corresponds directly to the instructions +defined in your program. It specifies the required accounts and parameters for +each instruction. + + + + + +The program below includes an `initialize` instruction, specifying the accounts +and parameters it requires. + +```rust {8-12, 15-22} +use anchor_lang::prelude::*; + +declare_id!("BYFW1vhC1ohxwRbYoLbAWs86STa25i9sD5uEusVjTYNd"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + + + + +The generated IDL file includes the instruction in a standardized JSON format, +including its name, accounts, arguments, and discriminator. + +```json filename="JSON" {11-12, 14-27, 30-33} +{ + "address": "BYFW1vhC1ohxwRbYoLbAWs86STa25i9sD5uEusVjTYNd", + "metadata": { + "name": "hello_anchor", + "version": "0.1.0", + "spec": "0.1.0", + "description": "Created with Anchor" + }, + "instructions": [ + { + "name": "initialize", + "discriminator": [175, 175, 109, 31, 13, 152, 155, 237], + "accounts": [ + { + "name": "new_account", + "writable": true, + "signer": true + }, + { + "name": "signer", + "writable": true, + "signer": true + }, + { + "name": "system_program", + "address": "11111111111111111111111111111111" + } + ], + "args": [ + { + "name": "data", + "type": "u64" + } + ] + } + ], + "accounts": [ + { + "name": "NewAccount", + "discriminator": [176, 95, 4, 118, 91, 177, 125, 232] + } + ], + "types": [ + { + "name": "NewAccount", + "type": { + "kind": "struct", + "fields": [ + { + "name": "data", + "type": "u64" + } + ] + } + } + ] +} +``` + + + + +The IDL file is then used to generate a client for interacting with the program, +simplifying the process of invoking the program instruction. + +```ts {19-26} +import * as anchor from "@coral-xyz/anchor"; +import { Program, BN } from "@coral-xyz/anchor"; +import { HelloAnchor } from "../target/types/hello_anchor"; +import { Keypair } from "@solana/web3.js"; +import assert from "assert"; + +describe("hello_anchor", () => { + const provider = anchor.AnchorProvider.env(); + anchor.setProvider(provider); + const wallet = provider.wallet as anchor.Wallet; + const program = anchor.workspace.HelloAnchor as Program; + + it("initialize", async () => { + // Generate keypair for the new account + const newAccountKp = new Keypair(); + + // Send transaction + const data = new BN(42); + const transactionSignature = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + }) + .signers([newAccountKp]) + .rpc(); + + // Fetch the created account + const newAccount = await program.account.newAccount.fetch( + newAccountKp.publicKey, + ); + + console.log("Transaction signature: ", transactionSignature); + console.log("On-chain data is:", newAccount.data.toString()); + assert(data.eq(newAccount.data)); + }); +}); +``` + + + + +## Program Accounts + +The `accounts` array in the IDL corresponds to the structs in a program +annotated with the `#[account]` macro. These structs define the data stored in +accounts created by the program. + + + + + +The program below defines a `NewAccount` struct with a single `data` field of +type `u64`. + +```rust {24-27} +use anchor_lang::prelude::*; + +declare_id!("BYFW1vhC1ohxwRbYoLbAWs86STa25i9sD5uEusVjTYNd"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + + + + +The generated IDL file includes the account in a standardized JSON format, +including its name, discriminator, and fields. + +```json filename="JSON" {39-40, 45-54} +{ + "address": "BYFW1vhC1ohxwRbYoLbAWs86STa25i9sD5uEusVjTYNd", + "metadata": { + "name": "hello_anchor", + "version": "0.1.0", + "spec": "0.1.0", + "description": "Created with Anchor" + }, + "instructions": [ + { + "name": "initialize", + "discriminator": [175, 175, 109, 31, 13, 152, 155, 237], + "accounts": [ + { + "name": "new_account", + "writable": true, + "signer": true + }, + { + "name": "signer", + "writable": true, + "signer": true + }, + { + "name": "system_program", + "address": "11111111111111111111111111111111" + } + ], + "args": [ + { + "name": "data", + "type": "u64" + } + ] + } + ], + "accounts": [ + { + "name": "NewAccount", + "discriminator": [176, 95, 4, 118, 91, 177, 125, 232] + } + ], + "types": [ + { + "name": "NewAccount", + "type": { + "kind": "struct", + "fields": [ + { + "name": "data", + "type": "u64" + } + ] + } + } + ] +} +``` + + + + +The IDL file is then used to generate a client for interacting with the program, +simplifying the process of fetching and deserializing account data. + +```ts {29-31} +import * as anchor from "@coral-xyz/anchor"; +import { Program, BN } from "@coral-xyz/anchor"; +import { HelloAnchor } from "../target/types/hello_anchor"; +import { Keypair } from "@solana/web3.js"; +import assert from "assert"; + +describe("hello_anchor", () => { + const provider = anchor.AnchorProvider.env(); + anchor.setProvider(provider); + const wallet = provider.wallet as anchor.Wallet; + const program = anchor.workspace.HelloAnchor as Program; + + it("initialize", async () => { + // Generate keypair for the new account + const newAccountKp = new Keypair(); + + // Send transaction + const data = new BN(42); + const transactionSignature = await program.methods + .initialize(data) + .accounts({ + newAccount: newAccountKp.publicKey, + signer: wallet.publicKey, + }) + .signers([newAccountKp]) + .rpc(); + + // Fetch the created account + const newAccount = await program.account.newAccount.fetch( + newAccountKp.publicKey, + ); + + console.log("Transaction signature: ", transactionSignature); + console.log("On-chain data is:", newAccount.data.toString()); + assert(data.eq(newAccount.data)); + }); +}); +``` + + + + +## Discriminators + +Anchor assigns a unique 8 byte discriminator to each instruction and account +type in a program. These discriminators serve as identifiers to distinguish +between different instructions or account types. + +The discriminator is generated using the first 8 bytes of the Sha256 hash of a +prefix combined with the instruction or account name. As of Anchor v0.30, these +discriminators are included in the IDL file. + +Note that when working with Anchor, you typically won't need to interact +directly with these discriminators. This section is primarily to provide context +on how the discriminator is generated and used. + + + + + +The instruction discriminator is used by the program to determine which specific +instruction to execute when called. + +When an Anchor program instruction is invoked, the discriminator is included as +the first 8 bytes of the instruction data. This is done automatically by the +Anchor client. + +```json filename="IDL" {4} + "instructions": [ + { + "name": "initialize", + "discriminator": [175, 175, 109, 31, 13, 152, 155, 237], + ... + } + ] +``` + +The discriminator for an instruction is the first 8 bytes of the Sha256 hash of +the prefix `global` plus the instruction name. + +For example: + +``` +sha256("global:initialize") +``` + +Hexadecimal output: + +``` +af af 6d 1f 0d 98 9b ed d4 6a 95 07 32 81 ad c2 1b b5 e0 e1 d7 73 b2 fb bd 7a b5 04 cd d4 aa 30 +``` + +The first 8 bytes are used as the discriminator for the instruction. + +``` +af = 175 +af = 175 +6d = 109 +1f = 31 +0d = 13 +98 = 152 +9b = 155 +ed = 237 +``` + +You can find the implementation of the discriminator generation in the Anchor +codebase +[here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/syn/src/codegen/program/common.rs#L5-L19), +which is used +[here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/syn/src/codegen/program/instruction.rs#L27). + + + + +The account discriminator is used to identify the specific account type when +deserializing on-chain data and is set when the account is created. + +```json filename="IDL" {4} + "accounts": [ + { + "name": "NewAccount", + "discriminator": [176, 95, 4, 118, 91, 177, 125, 232] + } + ] +``` + +The discriminator for an account is the first 8 bytes of the Sha256 hash of the +prefix `account` plus the account name. + +For example: + +``` +sha256("account:NewAccount") +``` + +Hexadecimal output: + +``` +b0 5f 04 76 5b b1 7d e8 a1 93 57 2a d3 5e b1 ae e5 f0 69 e2 09 7e 5c d2 64 56 55 2a cb 4a e9 57 +``` + +The first 8 bytes are used as the discriminator for the account. + +``` +b0 = 176 +5f = 95 +04 = 4 +76 = 118 +5b = 91 +b1 = 177 +7d = 125 +e8 = 232 +``` + +You can find the implementation of the discriminator generation in the Anchor +codebase +[here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L101-L117). + +Note that different programs using identical account names will generate the +same discriminator. When deserializing account data, Anchor programs will also +check an account is owned by the expected program for a specified account type. + + + diff --git a/docs/programs/anchor/index.md b/docs/programs/anchor/index.md new file mode 100644 index 000000000..7bf499b86 --- /dev/null +++ b/docs/programs/anchor/index.md @@ -0,0 +1,381 @@ +--- +title: Getting Started with Anchor +description: Getting Started with Anchor +sidebarLabel: Anchor Framework +sidebarSortOrder: 1 +altRoutes: + - /docs/programs/debugging + - /docs/programs/lang-c + - /docs/programs/overview +--- + +The Anchor framework is a tool that simplifies the process of building Solana +programs. Whether you're new to blockchain development or an experienced +programmer, Anchor simplifies the process of writing, testing, and deploying +Solana programs. + +In this section, we'll walk through: + +- Creating a new Anchor project +- Building and testing your program +- Deploying to Solana clusters +- Understanding the project file structure + +## Prerequisites + +For detailed installation instructions, visit the +[installation](/docs/intro/installation) page. + +Before you begin, ensure you have the following installed: + +- Rust: The programming language for building Solana programs. +- Solana CLI: Command-line tool for Solana development. +- Anchor CLI: Command-line tool for the Anchor framework. + +To verify Anchor CLI installation, open your terminal and run: + +```shell filename="Terminal" +anchor --version +``` + +Expected output: + +```shell filename="Terminal" +anchor-cli 0.30.1 +``` + +## Getting Started + +This section covers the basic steps to create, build, and test your first local +Anchor program. + + + +### Create a new Project + +To start a new project, use the `anchor init` command followed by your project's +name. This command creates a new directory with the specified name and sets up a +default program and test file. + +```shell filename="Terminal" +anchor init my-program +``` + +Navigate to the new project directory and open it in your code editor. + +```shell filename="Terminal" copy +cd my-project +``` + +The default Anchor program is located at `/programs/my-project/src/lib.rs`. + + + + +The value in the `declare_id!` macro is the program ID, a unique identifier for +your program. + +By default, it is the public key of the keypair generated in +`/target/deploy/my_project-keypair.json`. + +```rs filename="lib.rs" +use anchor_lang::prelude::*; + +declare_id!("3ynNB373Q3VAzKp7m4x238po36hjAGFXFJB4ybN2iTyg"); + +#[program] +pub mod my_project { + use super::*; + + pub fn initialize(ctx: Context) -> Result<()> { + msg!("Greetings from: {:?}", ctx.program_id); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize {} +``` + + + + +The default Typescript test file is located at `/tests/my-project.ts`. + + + + +This file demonstrates how to invoke the default program's `initialize` +instruction in Typescript. + +```ts filename="my-project.ts" +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { MyProject } from "../target/types/my_project"; + +describe("my-project", () => { + // Configure the client to use the local cluster. + anchor.setProvider(anchor.AnchorProvider.env()); + + const program = anchor.workspace.MyProject as Program; + + it("Is initialized!", async () => { + // Add your test here. + const tx = await program.methods.initialize().rpc(); + console.log("Your transaction signature", tx); + }); +}); +``` + + + + +If you prefer Rust for testing, initialize your project with the +`--test-template rust` flag. + +```shell +anchor init --test-template rust my-program +``` + +The Rust test file will be at `/tests/src/test_initialize.rs`. + + + + +```rust filename="test_initialize.rs" +use std::str::FromStr; + +use anchor_client::{ + solana_sdk::{ + commitment_config::CommitmentConfig, pubkey::Pubkey, signature::read_keypair_file, + }, + Client, Cluster, +}; + +#[test] +fn test_initialize() { + let program_id = "3ynNB373Q3VAzKp7m4x238po36hjAGFXFJB4ybN2iTyg"; + let anchor_wallet = std::env::var("ANCHOR_WALLET").unwrap(); + let payer = read_keypair_file(&anchor_wallet).unwrap(); + + let client = Client::new_with_options(Cluster::Localnet, &payer, CommitmentConfig::confirmed()); + let program_id = Pubkey::from_str(program_id).unwrap(); + let program = client.program(program_id).unwrap(); + + let tx = program + .request() + .accounts(my_program::accounts::Initialize {}) + .args(my_program::instruction::Initialize {}) + .send() + .expect(""); + + println!("Your transaction signature {}", tx); +} +``` + + + + +### Build the Program + +Build the program by running `anchor build`. + +```shell filename="Terminal" copy +anchor build +``` + +The compiled program will be at `/target/deploy/my_project.so`. The content of +this file is what gets stored on the Solana network (as an executable account) +when you deploy your program. + +### Test the Program + +To test the program, run `anchor test`. + +```shell filename="Terminal" copy +anchor test +``` + +By default, the `Anchor.toml` config file specifies the `localnet` cluster. When +developing on `localnet`, `anchor test` will automatically: + +1. Start a local Solana validator +2. Build and deploy your program to the local cluster +3. Run the tests in the `tests` folder +4. Stop the local Solana validator + +Alternatively, you can manually start a local Solana validator and run tests +against it. This is useful if you want to keep the validator running while you +iterate on your program. It allows you to inspect accounts and transaction logs +on the [Solana Explorer](https://explorer.solana.com/?cluster=custom) while +developing locally. + +Open a new terminal and start a local Solana validator by running the +`solana-test-validator` command. + +```shell filename="Terminal" copy +solana-test-validator +``` + +In a separate terminal, run the tests against the local cluster. Use the +`--skip-local-validator` flag to skip starting the local validator since it's +already running. + +```shell filename="Terminal" copy +anchor test --skip-local-validator +``` + +### Deploy to Devnet + +By default, the `Anchor.toml` config file in an Anchor project specifies the +localnet cluster. + +```toml filename="Anchor.toml" {14} +[toolchain] + +[features] +resolution = true +skip-lint = false + +[programs.localnet] +my_program = "3ynNB373Q3VAzKp7m4x238po36hjAGFXFJB4ybN2iTyg" + +[registry] +url = "https://api.apr.dev" + +[provider] +cluster = "Localnet" +wallet = "~/.config/solana/id.json" + +[scripts] +test = "yarn run ts-mocha -p ./tsconfig.json -t 1000000 tests/**/*.ts" +``` + +To deploy your program to devnet, change the `cluster` value to `Devnet`. Note +that this requires your wallet to have enough SOL on Devnet to cover deployment +cost. + +```diff +-cluster = "Localnet" ++cluster = "Devnet" +``` + +```toml filename="Anchor.toml" +[provider] +cluster = "Devnet" +wallet = "~/.config/solana/id.json" +``` + +Now when you run `anchor deploy`, your program will be deployed to the devnet +cluster. The `anchor test` command will also use the cluster specified in the +`Anchor.toml` file. + +```shell +anchor deploy +``` + +To deploy to mainnet, simply update the `Anchor.toml` file to specify the +mainnet cluster. + +```toml filename="Anchor.toml" +[provider] +cluster = "Mainnet" +wallet = "~/.config/solana/id.json" +``` + +### Update the Program + +Solana programs can be updated by redeploying the program to the same program +ID. + +To update a program, simply make changes to your program's code and run the +`anchor build` command to generated an updated `.so` file. + +```shell +anchor build +``` + +Then run the `anchor deploy` command to redeploy the updated program. + +```shell +anchor deploy +``` + +### Close the Program + +To reclaim the SOL allocated to a program account, you can close your Solana +program. + +To close a program, use the `solana program close ` command. For +example: + +```shell +solana program close 3ynNB373Q3VAzKp7m4x238po36hjAGFXFJB4ybN2iTyg --bypass-warning +``` + +Note that once a program is closed, the program ID cannot be reused to deploy a +new program. + + + +## Project File Structure + +Below is an overview of default file structure in an Anchor workspace: + +``` +. +├── .anchor +│ └── program-logs +├── app +├── migrations +├── programs +│ └── [project-name] +│ └── src +│ ├── lib.rs +│ ├── Cargo.toml +│ └── Xargo.toml +├── target +│ ├── deploy +│ │ └── [project-name]-keypair.json +│ ├── idl +│ │ └── [project-name].json +│ └── types +│ └── [project-name].ts +├── tests +│ └── [project-name].ts +├── Anchor.toml +├── Cargo.toml +└── package.json +``` + +### Programs Folder + +The `/programs` folder contains your project's Anchor programs. A single +workspace can contain multiple programs. + +### Tests Folder + +The `/tests` folder contains test files for your project. A default test file is +created for you when you create your project. + +### Target Folder + +The `/target` folder contains build outputs. The main subfolders include: + +- `/deploy`: Contains the keypair and program binary for your programs. +- `/idl`: Contains the JSON IDL for your programs. +- `/types`: Contains the TypeScript type for the IDL. + +### Anchor.toml File + +The `Anchor.toml` file configures workspace settings for your project. + +### .anchor Folder + +Includes a `program-logs` file that contains transaction logs from the last run +of test files. + +### App Folder + +The `/app` folder is an empty folder that can be optionally used for your +frontend code. diff --git a/docs/programs/anchor/pda.md b/docs/programs/anchor/pda.md new file mode 100644 index 000000000..bec281733 --- /dev/null +++ b/docs/programs/anchor/pda.md @@ -0,0 +1,325 @@ +--- +title: PDAs with Anchor +description: + Learn how to use Program Derived Addresses (PDAs) in Anchor programs, using + constraints, and implementing common PDA patterns +sidebarLabel: PDAs with Anchor +sidebarSortOrder: 4 +--- + +[Program Derived Addresses (PDA)](/docs/core/pda) refer to a feature of Solana +development that allows you to create a unique address derived deterministically +from pre-defined inputs (seeds) and a program ID. + +This section will cover basic examples of how to use PDAs in an Anchor program. + +## Anchor PDA Constraints + +When using PDAs in an Anchor program, you generally use Anchor's account +constraints to define the seeds used to derive the PDA. These constraints serve +as security checks to ensure that the correct address is derived. + +The constraints used to define the PDA seeds include: + +- `seeds`: An array of optional seeds used to derive the PDA. Seeds can be + static values or dynamic references to account data. +- `bump`: The bump seed used to derive the PDA. Used to ensure the address falls + off the Ed25519 curve and is a valid PDA. +- `seeds::program` - (Optional) The program ID used to derive the PDA address. + This constraint is only used to derive a PDA where the program ID is not the + current program. + +The `seeds` and `bump` constraints are required to be used together. + +### Usage Examples + +Below are examples demonstrating how to use PDA constraints in an Anchor +program. + + + + + +The `seeds` constraint specifies the optional values used to derive the PDA. + +#### No Optional Seeds + +- Use an empty array `[]` to define a PDA without optional seeds. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + +#### Single Static Seed + +- Specify optional seeds in the `seeds` constraint. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [b"hello_world"], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + +#### Multiple Seeds and Account References + +- Multiple seeds can be specified in the `seeds` constraint. The `seeds` + constraint can also reference other account addresses or account data. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + pub signer: Signer<'info>, + #[account( + seeds = [b"hello_world", signer.key().as_ref()], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + +The example above uses both a static seed (`b"hello_world"`) and a dynamic seed +(the signer's public key). + + + + +The `bump` constraint specifies the bump seed used to derive the PDA. + +#### Automatic Bump Calculation + +When using the `bump` constraint without a value, the bump is automatically +calculated each time the instruction is invoked. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [b"hello_world"], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + +#### Specify Bump Value + +You can explicitly provide the bump value, which is useful for optimizing +compute unit usage. This assumes that the PDA account has been created and the +bump seed is stored as a field on an existing account. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [b"hello_world"], + bump = pda_account.bump_seed, + )] + pub pda_account: Account<'info, CustomAccount>, +} + +#[account] +pub struct CustomAccount { + pub bump_seed: u8, +} +``` + +By storing the bump value in the account's data, the program doesn't need to +recalculate it, saving compute units. The saved bump value can be stored on the +account itself or another account. + + + + +The `seeds::program` constraint specifies the program ID used to derive the PDA. +This constraint is only used when deriving a PDA from a different program. + +Use this constraint when your instruction needs to interact with PDA accounts +created by another program. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account( + seeds = [b"hello_world"], + bump, + seeds::program = other_program.key(), + )] + pub pda_account: SystemAccount<'info>, + pub other_program: Program<'info, OtherProgram>, +} +``` + + + + +The `init` constraint is commonly used with `seeds` and `bump` to create a new +account with an address that is a PDA. Under the hood, the `init` constraint +invokes the System Program to create the account. + +```rs +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + #[account(mut)] + pub signer: Signer<'info>, + #[account( + init, + seeds = [b"hello_world", signer.key().as_ref()], + bump, + payer = signer, + space = 8 + 1, + )] + pub pda_account: Account<'info, CustomAccount>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct CustomAccount { + pub bump_seed: u8, +} +``` + + + + +## PDA seeds in the IDL + +Program Derived Address (PDA) seeds defined in the `seeds` constraint are +included in the program's IDL file. This allows the Anchor client to +automatically resolve accounts using these seeds when constructing instructions. + +This example below shows the relationship between the program, IDL, and client. + + + + + +The program below defines a `pda_account` using a static seed (`b"hello_world"`) +and the signer's public key as a dynamic seed. + +```rs {18} /signer/ +use anchor_lang::prelude::*; + +declare_id!("BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5"); + +#[program] +mod hello_anchor { + use super::*; + pub fn test_instruction(ctx: Context) -> Result<()> { + msg!("PDA: {}", ctx.accounts.pda_account.key()); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct InstructionAccounts<'info> { + pub signer: Signer<'info>, + #[account( + seeds = [b"hello_world", signer.key().as_ref()], + bump, + )] + pub pda_account: SystemAccount<'info>, +} +``` + + + + +The program's IDL file includes the PDA seeds defined in the `seeds` constraint. + +- The static seed `b"hello_world"` is converted to byte values. +- The dynamic seed is included as reference to the signer account. + +```json {22-29} +{ + "address": "BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5", + "metadata": { + "name": "hello_anchor", + "version": "0.1.0", + "spec": "0.1.0", + "description": "Created with Anchor" + }, + "instructions": [ + { + "name": "test_instruction", + "discriminator": [33, 223, 61, 208, 32, 193, 201, 79], + "accounts": [ + { + "name": "signer", + "signer": true + }, + { + "name": "pda_account", + "pda": { + "seeds": [ + { + "kind": "const", + "value": [104, 101, 108, 108, 111, 95, 119, 111, 114, 108, 100] + }, + { + "kind": "account", + "path": "signer" + } + ] + } + } + ], + "args": [] + } + ] +} +``` + + + + +The Anchor client can automatically resolve the PDA address using the IDL file. + +In the example below, Anchor automatically resolves the PDA address using the +provider wallet as the signer, and its public key as the dynamic seed for PDA +derivation. This removes the need to explicitly derive the PDA when building the +instruction. + +```ts {13} +import * as anchor from "@coral-xyz/anchor"; +import { Program } from "@coral-xyz/anchor"; +import { HelloAnchor } from "../target/types/hello_anchor"; + +describe("hello_anchor", () => { + // Configure the client to use the local cluster. + anchor.setProvider(anchor.AnchorProvider.env()); + + const program = anchor.workspace.HelloAnchor as Program; + + it("Is initialized!", async () => { + // Add your test here. + const tx = await program.methods.testInstruction().rpc(); + console.log("Your transaction signature", tx); + }); +}); +``` + +When the instruction is invoked, the PDA is printed to program logs as defined +in the program instruction. + +```{3} +Program BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5 invoke [1] +Program log: Instruction: TestInstruction +Program log: PDA: 3Hikt5mpKaSS4UNA5Du1TZJ8tp4o8VC8YWW6X9vtfVnJ +Program BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5 consumed 18505 of 200000 compute units +Program BZLiJ62bzRryYp9mRobz47uA66WDgtfTXhhgM25tJyx5 success +``` + + + diff --git a/docs/programs/anchor/program-structure.md b/docs/programs/anchor/program-structure.md new file mode 100644 index 000000000..a8b0f2475 --- /dev/null +++ b/docs/programs/anchor/program-structure.md @@ -0,0 +1,399 @@ +--- +title: Anchor Program Structure +description: + Learn about the structure of Anchor programs, including key macros and their + roles in simplifying Solana program development +sidebarLabel: Program Structure +sidebarSortOrder: 1 +--- + +The [Anchor framework](https://www.anchor-lang.com/) uses +[Rust macros](https://doc.rust-lang.org/book/ch19-06-macros.html) to reduce +boilerplate code and simplify the implementation of common security checks +required for writing Solana programs. + +The main macros found in an Anchor program include: + +- [`declare_id`](#declare-id-macro): Specifies the program's on-chain address +- [`#[program]`](#program-macro): Specifies the module containing the program’s + instruction logic +- [`#[derive(Accounts)]`](#derive-accounts-macro): Applied to structs to + indicate a list of accounts required by an instruction +- [`#[account]`](#account-macro): Applied to structs to create custom account + types for the program + +## Example Program + +Let's examine a simple program that demonstrates the usage of the macros +mentioned above to understand the basic structure of an Anchor program. + +The example program below creates a new account (`NewAccount`) that stores a +`u64` value passed to the `initialize` instruction. + +```rust filename="lib.rs" +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + +## declare_id! macro + +The +[`declare_id`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L430) +macro specifies the on-chain address of the program, known as the program ID. + +```rust filename="lib.rs" {3} +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); +``` + +By default, the program ID is the public key of the keypair generated at +`/target/deploy/your_program_name.json`. + +To update the value of the program ID in the `declare_id` macro with the public +key of the keypair in the `/target/deploy/your_program_name.json` file, run the +following command: + +```shell filename="Terminal" +anchor keys sync +``` + +The `anchor keys sync` command is useful to run when cloning a repository where +the value of the program ID in a cloned repo's `declare_id` macro won't match +the one generated when you run `anchor build` locally. + +## #[program] macro + +The +[`#[program]`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/program/src/lib.rs#L12) +macro defines the module that contains all the instruction handlers for your +program. Each public function within this module corresponds to an instruction +that can be invoked. + +```rust filename="lib.rs" {5, 8-12} +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + +### Instruction Context + +Instruction handlers are functions that define the logic executed when an +instruction is invoked. The first parameter of each handler is a `Context` +type, where `T` is a struct implementing the `Accounts` trait and specifies the +accounts the instruction requires. + +The +[`Context`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/src/context.rs#L24) +type provides the instruction with access to the following non-argument inputs: + +```rust +pub struct Context<'a, 'b, 'c, 'info, T> { + /// Currently executing program id. + pub program_id: &'a Pubkey, + /// Deserialized accounts. + pub accounts: &'b mut T, + /// Remaining accounts given but not deserialized or validated. + /// Be very careful when using this directly. + pub remaining_accounts: &'c [AccountInfo<'info>], + /// Bump seeds found during constraint validation. This is provided as a + /// convenience so that handlers don't have to recalculate bump seeds or + /// pass them in as arguments. + pub bumps: BTreeMap, +} +``` + +The `Context` fields can be accessed in an instruction using dot notation: + +- `ctx.accounts`: The accounts required for the instruction +- `ctx.program_id`: The program's public key (address) +- `ctx.remaining_accounts`: Additional accounts not specified in the `Accounts` + struct. +- `ctx.bumps`: Bump seeds for any + [Program Derived Address (PDA)](/docs/core/pda.md) accounts specified in the + `Accounts` struct + +Additional parameters are optional and can be included to specify arguments that +must be provided when the instruction is invoked. + +```rust filename="lib.rs" /Context/ /data/1 +pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) +} +``` + +In this example, the `Initialize` struct implements the `Accounts` trait where +each field in the struct represents an account required by the `initialize` +instruction. + +```rust filename="lib.rs" /Initialize/ /Accounts/ +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +## #[derive(Accounts)] macro + +The +[`#[derive(Accounts)]`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/derive/accounts/src/lib.rs#L630) +macro is applied to a struct to specify the accounts that must be provided when +an instruction is invoked. This macro implements the +[`Accounts`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/src/lib.rs#L105) +trait, which simplifies account validation and serialization and deserialization +of account data. + +```rust /Accounts/ {1} +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +Each field in the struct represents an account required by an instruction. The +naming of each field is arbitrary, but it is recommended to use a descriptive +name that indicates the purpose of the account. + +```rust /signer/2 /new_account/ /system_program/ +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} +``` + +### Account Validation + +To prevent security vulnerabiliies, it's important to verify that accounts +provided to an instruction are the expected accounts. Accounts are validated in +Anchor programs in two ways that are generally used together: + +- [Account Constraints](https://www.anchor-lang.com/docs/account-constraints): + Constraints define additional conditions that an account must satisfy to be + considered valid for the instruction. Constraints are applied using the + `#[account(..)]` attribute, which is placed above a field in a struct that + implements the `Accounts` trait. + + You can find the implementation of the constraints + [here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/syn/src/parser/accounts/constraints.rs). + + ```rust {3, 5} + #[derive(Accounts)] + pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, + } + ``` + +- [Account Types](https://www.anchor-lang.com/docs/account-types): Anchor + provides various account types to help ensure that the account provided by the + client matches what the program expects. + + You can find the implementation of the account types + [here](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/src/accounts). + + ```rust /Account/2 /Signer/ /Program/ + #[derive(Accounts)] + pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, + } + ``` + +When an instruction in an Anchor program is invoked, the program first validates +the accounts provided before executing the instruction's logic. After +validation, these accounts can be accessed within the instruction using the +`ctx.accounts` syntax. + +```rust filename="lib.rs" /ctx.accounts.new_account/ /new_account/ /Initialize/ +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + +## #[account] macro + +The +[`#[account]`](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L66) +macro is applied to structs that define the data stored in custom accounts +created by your program. + +```rust +#[account] +pub struct NewAccount { + data: u64, +} +``` + +This macro implements various traits +[detailed here](https://docs.rs/anchor-lang/latest/anchor_lang/attr.account.html). +The key functionalities of the `#[account]` macro include: + +- [Assign Program Owner](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L119-L132): + When creating an account, the program owner of the account is automatically + set to the program specified in `declare_id`. +- [Set Discriminator](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L101-L117): + A unique 8 byte discriminator, specific to the account type, is added as the + first 8 bytes of account data during its initialization. This helps in + differentiating account types and is used for account validation. +- [Data Serialization and Deserialization](https://github.com/coral-xyz/anchor/blob/v0.30.1/lang/attribute/account/src/lib.rs#L202-L246): + Account data is automatically serialized and deserialized as the account type. + +```rust filename="lib.rs" /data/2,6 /NewAccount/ {24-27} +use anchor_lang::prelude::*; + +declare_id!("11111111111111111111111111111111"); + +#[program] +mod hello_anchor { + use super::*; + pub fn initialize(ctx: Context, data: u64) -> Result<()> { + ctx.accounts.new_account.data = data; + msg!("Changed data to: {}!", data); + Ok(()) + } +} + +#[derive(Accounts)] +pub struct Initialize<'info> { + #[account(init, payer = signer, space = 8 + 8)] + pub new_account: Account<'info, NewAccount>, + #[account(mut)] + pub signer: Signer<'info>, + pub system_program: Program<'info, System>, +} + +#[account] +pub struct NewAccount { + data: u64, +} +``` + +### Account Discriminator + +An account discriminator in an Anchor program refers to an 8 byte identifier +unique to each account type. It's derived from the first 8 bytes of the SHA256 +hash of the string `account:`. This discriminator is stored as the +first 8 bytes of account data when an account is created. + +When creating an account in an Anchor program, 8 bytes must be allocated for the +discriminator. + +```rust /8/1 +#[account(init, payer = signer, space = 8 + 8)] +pub new_account: Account<'info, NewAccount>, +``` + +The discriminator is used during the following two scenarios: + +- Initialization: When an account is created, the discriminator is set as the + first 8 bytes of the account's data. +- Deserialization: When account data is deserialized, the first 8 bytes of + account data is checked against the discriminator of the expected account + type. + +If there's a mismatch, it indicates that the client has provided an unexpected +account. This mechanism serves as an account validation check in Anchor +programs. diff --git a/docs/programs/debugging.md b/docs/programs/debugging.md deleted file mode 100644 index 1f2ace740..000000000 --- a/docs/programs/debugging.md +++ /dev/null @@ -1,273 +0,0 @@ ---- -title: "Debugging Programs" ---- - -Solana programs run on-chain, so debugging them in the wild can be challenging. -To make debugging programs easier, developers can write unit tests that directly -test their program's execution via the Solana runtime, or run a local cluster -that will allow RPC clients to interact with their program. - -## Running unit tests - -- [Testing with Rust](/docs/programs/lang-rust.md#how-to-test) -- [Testing with C](/docs/programs/lang-c.md#how-to-test) - -## Logging - -During program execution both the runtime and the program log status and error -messages. - -For information about how to log from a program see the language specific -documentation: - -- [Logging from a Rust program](/docs/programs/lang-rust.md#logging) -- [Logging from a C program](/docs/programs/lang-c.md#logging) - -When running a local cluster the logs are written to stdout as long as they are -enabled via the `RUST_LOG` log mask. From the perspective of program development -it is helpful to focus on just the runtime and program logs and not the rest of -the cluster logs. To focus in on program specific information the following log -mask is recommended: - -```shell -export RUST_LOG=solana_runtime::system_instruction_processor=trace,solana_runtime::message_processor=info,solana_bpf_loader=debug,solana_rbpf=debug -``` - -Log messages coming directly from the program (not the runtime) will be -displayed in the form: - -`Program log: ` - -## Error Handling - -The amount of information that can be communicated via a transaction error is -limited but there are many points of possible failures. The following are -possible failure points and information about what errors to expect and where to -get more information: - -- The SBF loader may fail to parse the program, this should not happen since the - loader has already _finalized_ the program's account data. - - `InstructionError::InvalidAccountData` will be returned as part of the - transaction error. -- The SBF loader may fail to setup the program's execution environment - - `InstructionError::Custom(0x0b9f_0001)` will be returned as part of the - transaction error. "0x0b9f_0001" is the hexadecimal representation of - [`VirtualMachineCreationFailed`](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/programs/bpf_loader/src/lib.rs#L44). -- The SBF loader may have detected a fatal error during program executions - (things like panics, memory violations, system call errors, etc...) - - `InstructionError::Custom(0x0b9f_0002)` will be returned as part of the - transaction error. "0x0b9f_0002" is the hexadecimal representation of - [`VirtualMachineFailedToRunProgram`](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/programs/bpf_loader/src/lib.rs#L46). -- The program itself may return an error - - `InstructionError::Custom()` will be returned. The "user - defined value" must not conflict with any of the - [builtin runtime program errors](https://github.com/solana-labs/solana/blob/bc7133d7526a041d1aaee807b80922baa89b6f90/sdk/program/src/program_error.rs#L87). - Programs typically use enumeration types to define error codes starting at - zero so they won't conflict. - -In the case of `VirtualMachineFailedToRunProgram` errors, more information about -the specifics of what failed are written to the -[program's execution logs](/docs/programs/debugging.md#logging). - -For example, an access violation involving the stack will look something like -this: - -```text -SBF program 4uQeVj5tqViQh7yWWGStvkEG1Zmhx6uasJtWCJziofM failed: out of bounds memory store (insn #615), addr 0x200001e38/8 -``` - -## Monitoring Compute Budget Consumption - -The program can log the remaining number of compute units it will be allowed -before program execution is halted. Programs can use these logs to wrap -operations they wish to profile. - -- [Log the remaining compute units from a Rust program](/docs/programs/lang-rust.md#compute-budget) -- [Log the remaining compute units from a C program](/docs/programs/lang-c.md#compute-budget) - -See [compute budget](/docs/core/fees.md#compute-budget) for more information. - -## ELF Dump - -The SBF shared object internals can be dumped to a text file to gain more -insight into a program's composition and what it may be doing at runtime. - -- [Create a dump file of a Rust program](/docs/programs/lang-rust.md#elf-dump) -- [Create a dump file of a C program](/docs/programs/lang-c.md#elf-dump) - -## Instruction Tracing - -During execution the runtime SBF interpreter can be configured to log a trace -message for each SBF instruction executed. This can be very helpful for things -like pin-pointing the runtime context leading up to a memory access violation. - -The trace logs together with the [ELF dump](#elf-dump) can provide a lot of -insight (though the traces produce a lot of information). - -To turn on SBF interpreter trace messages in a local cluster configure the -`solana_rbpf` level in `RUST_LOG` to `trace`. For example: - -`export RUST_LOG=solana_rbpf=trace` - -## Source level debugging - -Source level debugging of onchain programs written in Rust or C can be done -using the `program run` subcommand of `solana-ledger-tool`, and lldb, -distributed with Solana Rust and Clang compiler binary package platform-tools. - -The `solana-ledger-tool program run` subcommand loads a compiled on-chain -program, executes it in RBPF virtual machine and runs a gdb server that accepts -incoming connections from LLDB or GDB. Once lldb is connected to -`solana-ledger-tool` gdbserver, it can control execution of an on-chain program. -Run `solana-ledger-tool program run --help` for an example of specifying input -data for parameters of the program entrypoint function. - -To compile a program for debugging use cargo-build-sbf build utility with the -command line option `--debug`. The utility will generate two loadable files, one -a usual loadable module with the extension `.so`, and another the same loadable -module but containing Dwarf debug information, a file with extension `.debug`. - -To execute a program in debugger, run `solana-ledger-tool program run` with -`-e debugger` command line option. For example, a crate named 'helloworld' is -compiled and an executable program is built in `target/deploy` directory. There -should be three files in that directory - -- helloworld-keypair.json -- a keypair for deploying the program, -- helloworld.debug -- a binary file containing debug information, -- helloworld.so -- an executable file loadable into the virtual machine. The - command line for running `solana-ledger-tool` would be something like this - -```shell -solana-ledger-tool program run -l test-ledger -e debugger target/deploy/helloworld.so -``` - -Note that `solana-ledger-tool` always loads a ledger database. Most on-chain -programs interact with a ledger in some manner. Even if for debugging purpose a -ledger is not needed, it has to be provided to `solana-ledger-tool`. A minimal -ledger database can be created by running `solana-test-validator`, which creates -a ledger in `test-ledger` subdirectory. - -In debugger mode `solana-ledger-tool program run` loads an `.so` file and starts -listening for an incoming connection from a debugger - -```text -Waiting for a Debugger connection on "127.0.0.1:9001"... -``` - -To connect to `solana-ledger-tool` and execute the program, run lldb. For -debugging rust programs it may be beneficial to run solana-lldb wrapper to lldb, -i.e. at a new shell prompt (other than the one used to start -`solana-ledger-tool`) run the command: - -```shell -solana-lldb -``` - -This script is installed in platform-tools path. If that path is not added to -`PATH` environment variable, it may be necessary to specify the full path, e.g. - -```text -~/.cache/solana/v1.35/platform-tools/llvm/bin/solana-lldb -``` - -After starting the debugger, load the .debug file by entering the following -command at the debugger prompt - -```text -(lldb) file target/deploy/helloworld.debug -``` - -If the debugger finds the file, it will print something like this - -```text -Current executable set to '/path/helloworld.debug' (bpf). -``` - -Now, connect to the gdb server that `solana-ledger-tool` implements, and debug -the program as usual. Enter the following command at lldb prompt - -```text -(lldb) gdb-remote 127.0.0.1:9001 -``` - -If the debugger and the gdb server establish a connection, the execution of the -program will be stopped at the entrypoint function, and lldb should print -several lines of the source code around the entrypoint function signature. From -this point on, normal lldb commands can be used to control execution of the -program being debugged. - -### Debugging in an IDE - -To debug onchain programs in Visual Studio IDE, install the CodeLLDB extension. -Open CodeLLDB Extension Settings. In Advanced settings change the value of -`Lldb: Library` field to the path of `liblldb.so` (or liblldb.dylib on macOS). -For example on Linux a possible path to Solana customized lldb can be -`/home//.cache/solana/v1.33/platform-tools/llvm/lib/liblldb.so.` where -`` is your Linux system username. This can also be added directly to -`~/.config/Code/User/settings.json` file, e.g. - -```json -{ - "lldb.library": "/home//.cache/solana/v1.35/platform-tools/llvm/lib/liblldb.so" -} -``` - -In `.vscode` subdirectory of your on-chain project, create two files - -First file is `tasks.json` with the following content - -```json -{ - "version": "2.0.0", - "tasks": [ - { - "label": "build", - "type": "shell", - "command": "cargo build-sbf --debug", - "problemMatcher": [], - "group": { - "kind": "build", - "isDefault": true - } - }, - { - "label": "solana-debugger", - "type": "shell", - "command": "solana-ledger-tool program run -l test-ledger -e debugger ${workspaceFolder}/target/deploy/helloworld.so" - } - ] -} -``` - -The first task is to build the on-chain program using cargo-build-sbf utility. -The second task is to run `solana-ledger-tool program run` in debugger mode. - -Another file is `launch.json` with the following content - -```json -{ - "version": "0.2.0", - "configurations": [ - { - "type": "lldb", - "request": "custom", - "name": "Debug", - "targetCreateCommands": [ - "target create ${workspaceFolder}/target/deploy/helloworld.debug" - ], - "processCreateCommands": ["gdb-remote 127.0.0.1:9001"] - } - ] -} -``` - -This file specifies how to run debugger and to connect it to the gdb server -implemented by `solana-ledger-tool`. - -To start debugging a program, first build it by running the build task. The next -step is to run `solana-debugger` task. The tasks specified in `tasks.json` file -are started from `Terminal >> Run Task...` menu of VSCode. When -`solana-ledger-tool` is running and listening from incoming connections, it's -time to start the debugger. Launch it from VSCode `Run and Debug` menu. If -everything is set up correctly, VSCode will start a debugging session and the -program execution should stop on the entrance into the `entrypoint` function. diff --git a/docs/programs/deploying.md b/docs/programs/deploying.md index 78aa89fc9..394c4de7b 100644 --- a/docs/programs/deploying.md +++ b/docs/programs/deploying.md @@ -4,6 +4,7 @@ description: "Deploying onchain programs can be done using the Solana CLI using the Upgradable BPF loader to upload the compiled byte-code to the Solana blockchain." +sidebarSortOrder: 3 --- Solana onchain programs (otherwise known as "smart contracts") are stored in @@ -140,19 +141,6 @@ on developers who deploy their own programs since [program accounts](/docs/core/accounts.md#custom-programs) are among the largest we typically see on Solana. -#### Example of how much data is used for programs - -As a data point of the number of accounts and potential data stored on-chain, -below is the distribution of the largest accounts (at least 100KB) at slot -`103,089,804` on `mainnet-beta` by assigned on-chain program: - -1. **Serum Dex v3**: 1798 accounts -2. **Metaplex Candy Machine**: 1089 accounts -3. **Serum Dex v2**: 864 accounts -4. **Upgradeable BPF Program Loader**: 824 accounts -5. **BPF Program Loader v2**: 191 accounts -6. **BPF Program Loader v1**: 150 accounts - ### Reclaiming buffer accounts Buffer accounts are used by the Upgradeable BPF loader to temporarily store @@ -165,9 +153,6 @@ account, developers might retry their deployment with a new buffer and not realize that they stored a good chunk of SOL in a forgotten buffer account from an earlier deploy. -> As of slot `103,089,804` on `mainnet-beta` there are 276 abandoned buffer -> accounts that could be reclaimed! - Developers can check if they own any abandoned buffer accounts by using the Solana CLI: diff --git a/docs/programs/examples.md b/docs/programs/examples.md index bd70d765d..1d0c969b9 100644 --- a/docs/programs/examples.md +++ b/docs/programs/examples.md @@ -1,5 +1,4 @@ --- -date: 2024-04-26T00:00:00Z title: "Program Examples" description: "A list of Solana program examples in different languages and frameworks, @@ -24,10 +23,11 @@ keywords: - blockchain tutorial - web3 developer - anchor +sidebarSortOrder: 2 --- The -"[Solana Program Examples](https://github.com/solana-developers/program-examples)" +[Solana Program Examples](https://github.com/solana-developers/program-examples) repository on GitHub offers several subfolders, each containing code examples for different Solana programming paradigms and languages, designed to help developers learn and experiment with Solana blockchain development. @@ -35,9 +35,8 @@ developers learn and experiment with Solana blockchain development. You can find the examples in the `solana-developers/program-examples` together with README files that explain you how to run the different examples. Most examples are self-contained and are available in native Rust (ie, with no -framework), [Anchor](https://www.anchor-lang.com/docs/installation), -[Seahorse](https://seahorse-lang.org/) and it also contains a list of examples -that we would love to +framework) and [Anchor](https://www.anchor-lang.com/docs/installation). It also +contains a list of examples that we would love to [see as contributions](https://github.com/solana-developers/program-examples?tab=readme-ov-file#examples-wed-love-to-see). Within the repo you will find the following subfolder, each with assorted example programs within them: @@ -56,22 +55,22 @@ Contains a series of examples that demonstrate the foundational steps for building Solana programs using native Rust libraries. These examples are designed to help developers understand the core concepts of Solana programming. -| Example Name | Description | Language | -| ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | ----------------------------------- | -| [Account Data](https://github.com/solana-developers/program-examples/tree/main/basics/account-data) | Saving an address with name, house number, street and city in an account. | Native, Anchor | -| [Checking Accounts](https://github.com/solana-developers/program-examples/tree/main/basics/checking-accounts) | Security lessons that shows how to do account checks | Native, Anchor | -| [Close Account](https://github.com/solana-developers/program-examples/tree/main/basics/close-account) | Show you how to close accounts to get its rent back. | Native, Anchor | -| [Counter](https://github.com/solana-developers/program-examples/tree/main/basics/counter) | A simple counter program in all the different architectures. | Native, Anchor, Seahorse, mpl-stack | -| [Create Account](https://github.com/solana-developers/program-examples/tree/main/basics/create-account) | How to create a system account within a program. | Native, Anchor | -| [Cross Program Invocation](https://github.com/solana-developers/program-examples/tree/main/basics/cross-program-invocation) | Using a hand and lever analogy this shows you how to call another program from within a program. | Native, Anchor | -| [hello solana](https://github.com/solana-developers/program-examples/tree/main/basics/hello-solana) | Hello world example which just prints hello world in the transaction logs. | Native, Anchor | -| [Pda Rent payer](https://github.com/solana-developers/program-examples/tree/main/basics/pda-rent-payer) | Shows you how you can use the lamports from a PDA to pay for a new account. | Native, Anchor | -| [Processing Instructions](https://github.com/solana-developers/program-examples/tree/main/basics/processing-instructions) | Shows you how to handle instruction data string and u32. | Native, Anchor | -| [Program Derived Addresses](https://github.com/solana-developers/program-examples/tree/main/basics/program-derived-addresses) | Shows how to use seeds to refer to a PDA and save data in it. | Native, Anchor | -| [Realloc](https://github.com/solana-developers/program-examples/tree/main/basics/realloc) | Shows you how to increase and decrease the size of an existing account. | Native, Anchor | -| [Rent](https://github.com/solana-developers/program-examples/tree/main/basics/rent) | Here you will learn how to calculate rent requirements within a program. | Native, Anchor | -| [Repository Layout](https://github.com/solana-developers/program-examples/tree/main/basics/repository-layout) | Recommendations on how to structure your program layout. | Native, Anchor | -| [Transfer SOL](https://github.com/solana-developers/program-examples/tree/main/basics/transfer-sol) | Different methods of transferring SOL for system accounts and PDAs. | Native, Anchor, Seahorse | +| Example Name | Description | Language | +| ----------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------ | ------------------------- | +| [Account Data](https://github.com/solana-developers/program-examples/tree/main/basics/account-data) | Saving an address with name, house number, street and city in an account. | Native, Anchor | +| [Checking Accounts](https://github.com/solana-developers/program-examples/tree/main/basics/checking-accounts) | Security lessons that shows how to do account checks | Native, Anchor | +| [Close Account](https://github.com/solana-developers/program-examples/tree/main/basics/close-account) | Show you how to close accounts to get its rent back. | Native, Anchor | +| [Counter](https://github.com/solana-developers/program-examples/tree/main/basics/counter) | A simple counter program in all the different architectures. | Native, Anchor, mpl-stack | +| [Create Account](https://github.com/solana-developers/program-examples/tree/main/basics/create-account) | How to create a system account within a program. | Native, Anchor | +| [Cross Program Invocation](https://github.com/solana-developers/program-examples/tree/main/basics/cross-program-invocation) | Using a hand and lever analogy this shows you how to call another program from within a program. | Native, Anchor | +| [hello solana](https://github.com/solana-developers/program-examples/tree/main/basics/hello-solana) | Hello world example which just prints hello world in the transaction logs. | Native, Anchor | +| [Pda Rent payer](https://github.com/solana-developers/program-examples/tree/main/basics/pda-rent-payer) | Shows you how you can use the lamports from a PDA to pay for a new account. | Native, Anchor | +| [Processing Instructions](https://github.com/solana-developers/program-examples/tree/main/basics/processing-instructions) | Shows you how to handle instruction data string and u32. | Native, Anchor | +| [Program Derived Addresses](https://github.com/solana-developers/program-examples/tree/main/basics/program-derived-addresses) | Shows how to use seeds to refer to a PDA and save data in it. | Native, Anchor | +| [Realloc](https://github.com/solana-developers/program-examples/tree/main/basics/realloc) | Shows you how to increase and decrease the size of an existing account. | Native, Anchor | +| [Rent](https://github.com/solana-developers/program-examples/tree/main/basics/rent) | Here you will learn how to calculate rent requirements within a program. | Native, Anchor | +| [Repository Layout](https://github.com/solana-developers/program-examples/tree/main/basics/repository-layout) | Recommendations on how to structure your program layout. | Native, Anchor | +| [Transfer SOL](https://github.com/solana-developers/program-examples/tree/main/basics/transfer-sol) | Different methods of transferring SOL for system accounts and PDAs. | Native, Anchor, Seahorse | ## Compression @@ -89,9 +88,9 @@ focused on compressed NFTs (cNFTs). Oracles allow to use off chain data in programs. -| Example Name | Description | Language | -| ------------------------------------------------------------------------------------ | --------------------------------------------------------------- | ---------------- | -| [Pyth](https://github.com/solana-developers/program-examples/tree/main/oracles/pyth) | Pyth makes price data of tokens available in on chain programs. | Anchor, Seahorse | +| Example Name | Description | Language | +| ------------------------------------------------------------------------------------ | --------------------------------------------------------------- | -------- | +| [Pyth](https://github.com/solana-developers/program-examples/tree/main/oracles/pyth) | Pyth makes price data of tokens available in on chain programs. | Anchor | ## Tokens @@ -99,15 +98,15 @@ Most tokens on Solana use the Solana Program Library (SPL) token standard. Here you can find many examples on how to mint, transfer, burn tokens and even how to interact with them in programs. -| Example Name | Description | Language | -| --------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | ------------------------ | -| [Create Token](https://github.com/solana-developers/program-examples/tree/main/tokens/create-token) | How to create a token and add metaplex metadata to it. | Anchor, Native | -| [NFT Minter](https://github.com/solana-developers/program-examples/tree/main/tokens/nft-minter) | Minting only one amount of a token and then removing the mint authority. | Anchor, Native | -| [PDA Mint Authority](https://github.com/solana-developers/program-examples/tree/main/tokens/pda-mint-authority) | Shows you how to change the mint authority of a mint, to mint tokens from within a program. | Anchor, Native | -| [SPL Token Minter](https://github.com/solana-developers/program-examples/tree/main/tokens/spl-token-minter) | Explains how to use Associated Token Accounts to be able to keep track of token accounts. | Anchor, Native | -| [Token Swap](https://github.com/solana-developers/program-examples/tree/main/tokens/token-swap) | Extensive example that shows you how to build a AMM (automated market maker) pool for SPL tokens. | Anchor | -| [Transfer Tokens](https://github.com/solana-developers/program-examples/tree/main/tokens/transfer-tokens) | Shows how to transfer SPL token using CPIs into the token program. | Anchor, Native, Seahorse | -| [Token-2022](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022) | See Token 2022 (Token extensions). | Anchor, Native | +| Example Name | Description | Language | +| --------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------- | -------------- | +| [Create Token](https://github.com/solana-developers/program-examples/tree/main/tokens/create-token) | How to create a token and add metaplex metadata to it. | Anchor, Native | +| [NFT Minter](https://github.com/solana-developers/program-examples/tree/main/tokens/nft-minter) | Minting only one amount of a token and then removing the mint authority. | Anchor, Native | +| [PDA Mint Authority](https://github.com/solana-developers/program-examples/tree/main/tokens/pda-mint-authority) | Shows you how to change the mint authority of a mint, to mint tokens from within a program. | Anchor, Native | +| [SPL Token Minter](https://github.com/solana-developers/program-examples/tree/main/tokens/spl-token-minter) | Explains how to use Associated Token Accounts to be able to keep track of token accounts. | Anchor, Native | +| [Token Swap](https://github.com/solana-developers/program-examples/tree/main/tokens/token-swap) | Extensive example that shows you how to build a AMM (automated market maker) pool for SPL tokens. | Anchor | +| [Transfer Tokens](https://github.com/solana-developers/program-examples/tree/main/tokens/transfer-tokens) | Shows how to transfer SPL token using CPIs into the token program. | Anchor, Native | +| [Token-2022](https://github.com/solana-developers/program-examples/tree/main/tokens/token-2022) | See Token 2022 (Token extensions). | Anchor, Native | ## Token 2022 (Token Extensions) diff --git a/docs/programs/faq.md b/docs/programs/faq.md index a5dd87b8c..c478fa37d 100644 --- a/docs/programs/faq.md +++ b/docs/programs/faq.md @@ -1,23 +1,10 @@ --- title: "FAQ" +sidebarSortOrder: 7 --- -When writing or interacting with Solana programs, there are common questions or -challenges that often come up. Below are resources to help answer these -questions. - -If not addressed here, ask on -[StackExchange](https://solana.stackexchange.com/questions/ask?tags=solana-program) -with the `solana-program` tag. - -## Limitations - -Developing programs on the Solana blockchain have some inherent limitation -associated with them. Below is a list of common limitation that you may run -into. - -See [limitations of developing programs](/docs/programs/limitations.md) for more -details +Post your questions on +[StackExchange](https://solana.stackexchange.com/questions/ask). ## Berkeley Packet Filter (BPF) @@ -77,20 +64,11 @@ Some instructions require the account to be a signer; this error is returned if an account is expected to be signed but is not. An implementation of a program might also cause this error when performing a -cross-program invocation that requires a signed program address, but the passed -signer seeds passed to [`invoke_signed`](/docs/core/cpi.md) don't match the +[cross-program invocation](/docs/core/cpi.md) that requires a signed program +address, but the passed signer seeds passed to `invoke_signed` don't match the signer seeds used to create the program address [`create_program_address`](/docs/core/pda.md#createprogramaddress). -## `rand` Rust dependency causes compilation failure - -See -[Rust Project Dependencies](/docs/programs/lang-rust.md#project-dependencies) - -## Rust restrictions - -See [Rust restrictions](/docs/programs/lang-rust.md#restrictions) - ## Stack SBF uses stack frames instead of a variable stack pointer. Each stack frame is @@ -106,7 +84,7 @@ Error: Function _ZN16curve25519_dalek7edwards21EdwardsBasepointTable6create17h17 ``` The message identifies which symbol is exceeding its stack frame, but the name -might be mangled if it is a Rust or C++ symbol. +might be mangled. > To demangle a Rust symbol use [rustfilt](https://github.com/luser/rustfilt). @@ -117,8 +95,6 @@ rustfilt _ZN16curve25519_dalek7edwards21EdwardsBasepointTable6create17h178b3d241 curve25519_dalek::edwards::EdwardsBasepointTable::create ``` -To demangle a C++ symbol use `c++filt` from binutils. - The reason a warning is reported rather than an error is because some dependent crates may include functionality that violates the stack frame restrictions even if the program doesn't use that functionality. If the program violates the stack @@ -128,16 +104,16 @@ SBF stack frames occupy a virtual address range starting at `0x200000000`. ## Heap size -Programs have access to a runtime heap either directly in C or via the Rust -`alloc` APIs. To facilitate fast allocations, a simple 32KB bump heap is -utilized. The heap does not support `free` or `realloc` so use it wisely. +Programs have access to a runtime heap via the Rust `alloc` APIs. To facilitate +fast allocations, a simple 32KB bump heap is utilized. The heap does not support +`free` or `realloc`. Internally, programs have access to the 32KB memory region starting at virtual address 0x300000000 and may implement a custom heap based on the program's specific needs. -- [Rust program heap usage](/docs/programs/lang-rust.md#heap) -- [C program heap usage](/docs/programs/lang-c.md#heap) +Rust programs implement the heap directly by defining a custom +[`global_allocator`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/entrypoint.rs#L72) ## Loaders @@ -157,11 +133,7 @@ For all practical purposes program should always be written to target the latest BPF loader and the latest loader is the default for the command-line interface and the javascript APIs. -For language specific information about implementing a program for a particular -loader see: - - [Rust program entrypoints](/docs/programs/lang-rust.md#program-entrypoint) -- [C program entrypoints](/docs/programs/lang-c.md#program-entrypoint) ### Deployment @@ -193,10 +165,7 @@ results in various parameters falling on aligned offsets within the aligned byte array. This allows deserialization implementations to directly reference the byte array and provide aligned pointers to the program. -For language specific information about serialization see: - - [Rust program parameter deserialization](/docs/programs/lang-rust.md#parameter-deserialization) -- [C program parameter deserialization](/docs/programs/lang-c.md#parameter-deserialization) The latest loader serializes the program input parameters as follows (all encoding is little endian): diff --git a/docs/programs/index.md b/docs/programs/index.md index 6a09d28ff..4189752bd 100644 --- a/docs/programs/index.md +++ b/docs/programs/index.md @@ -1,5 +1,5 @@ --- title: Developing Programs +sidebarSortOrder: 2 metaOnly: true -sidebarSortOrder: 4 --- diff --git a/docs/programs/lang-c.md b/docs/programs/lang-c.md deleted file mode 100644 index 213cdce4c..000000000 --- a/docs/programs/lang-c.md +++ /dev/null @@ -1,191 +0,0 @@ ---- -title: "Developing with C" ---- - -Solana supports writing onchain programs using the C and C++ programming -languages. - -## Project Layout - -C projects are laid out as follows: - -```text -/src/ -/makefile -``` - -The `makefile` should contain the following: - -```shell -OUT_DIR := -include ~/.local/share/solana/install/active_release/bin/sdk/sbf/c/sbf.mk -``` - -The sbf-sdk may not be in the exact place specified above but if you setup your -environment per [How to Build](#how-to-build) then it should be. - -## How to Build - -First setup the environment: - -- Install the latest Rust stable from https://rustup.rs -- Install the latest [Solana command-line tools](/docs/intro/installation.md) - -Then build using make: - -```shell -make -C -``` - -## How to Test - -Solana uses the [Criterion](https://github.com/Snaipe/Criterion) test framework -and tests are executed each time the program is built -[How to Build](#how-to-build). - -To add tests, create a new file next to your source file named -`test_.c` and populate it with criterion test cases. See the -[Criterion docs](https://criterion.readthedocs.io/en/master) for information on -how to write a test case. - -## Program Entrypoint - -Programs export a known entrypoint symbol which the Solana runtime looks up and -calls when invoking a program. Solana supports multiple versions of the SBF -loader and the entrypoints may vary between them. Programs must be written for -and deployed to the same loader. For more details see the -[FAQ section on Loaders](/docs/programs/faq.md#loaders). - -Currently there are two supported loaders -[SBF Loader](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader.rs#L17) -and -[SBF loader deprecated](https://github.com/solana-labs/solana/blob/7ddf10e602d2ed87a9e3737aa8c32f1db9f909d8/sdk/program/src/bpf_loader_deprecated.rs#L14). - -They both have the same raw entrypoint definition, the following is the raw -symbol that the runtime looks up and calls: - -```c -extern uint64_t entrypoint(const uint8_t *input) -``` - -This entrypoint takes a generic byte array which contains the serialized program -parameters (program id, accounts, instruction data, etc...). To deserialize the -parameters each loader contains its own [helper function](#serialization). - -### Serialization - -Each loader provides a helper function that deserializes the program's input -parameters into C types: - -- [SBF Loader deserialization](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/bpf/c/inc/solana_sdk.h#L304) -- [SBF Loader deprecated deserialization](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/bpf/c/inc/deserialize_deprecated.h#L25) - -Some programs may want to perform deserialization themselves, and they can by -providing their own implementation of the [raw entrypoint](#program-entrypoint). -Take note that the provided deserialization functions retain references back to -the serialized byte array for variables that the program is allowed to modify -(lamports, account data). The reason for this is that upon return the loader -will read those modifications so they may be committed. If a program implements -their own deserialization function they need to ensure that any modifications -the program wishes to commit must be written back into the input byte array. - -Details on how the loader serializes the program inputs can be found in the -[Input Parameter Serialization](https://solana.com/docs/programs/faq#input-parameter-serialization) -docs. - -## Data Types - -The loader's deserialization helper function populates the -[SolParameters](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/sbf/c/inc/solana_sdk.h#L276) -structure: - -```c -/** - * Structure that the program's entrypoint input data is deserialized into. - */ -typedef struct { - SolAccountInfo* ka; /** Pointer to an array of SolAccountInfo, must already - point to an array of SolAccountInfos */ - uint64_t ka_num; /** Number of SolAccountInfo entries in `ka` */ - const uint8_t *data; /** pointer to the instruction data */ - uint64_t data_len; /** Length in bytes of the instruction data */ - const SolPubkey *program_id; /** program_id of the currently executing program */ -} SolParameters; -``` - -'ka' is an ordered array of the accounts referenced by the instruction and -represented as a -[SolAccountInfo](https://github.com/solana-labs/solana/blob/8415c22b593f164020adc7afe782e8041d756ddf/sdk/sbf/c/inc/solana_sdk.h#L173) -structures. An account's place in the array signifies its meaning, for example, -when transferring lamports an instruction may define the first account as the -source and the second as the destination. - -The members of the `SolAccountInfo` structure are read-only except for -`lamports` and `data`. Both may be modified by the program in accordance with -the "runtime enforcement policy". When an instruction reference the same account -multiple times there may be duplicate `SolAccountInfo` entries in the array but -they both point back to the original input byte array. A program should handle -these cases delicately to avoid overlapping read/writes to the same buffer. If a -program implements their own deserialization function care should be taken to -handle duplicate accounts appropriately. - -`data` is the general purpose byte array from the -[instruction's instruction data](/docs/core/transactions.md#instruction) being -processed. - -`program_id` is the public key of the currently executing program. - -## Heap - -C programs can allocate memory via the system call -[`calloc`](https://github.com/solana-labs/solana/blob/c3d2d2134c93001566e1e56f691582f379b5ae55/sdk/sbf/c/inc/solana_sdk.h#L245) -or implement their own heap on top of the 32KB heap region starting at virtual -address x300000000. The heap region is also used by `calloc` so if a program -implements their own heap it should not also call `calloc`. - -## Logging - -The runtime provides two system calls that take data and log it to the program -logs. - -- [`sol_log(const char*)`](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/sbf/c/inc/solana_sdk.h#L128) -- [`sol_log_64(uint64_t, uint64_t, uint64_t, uint64_t, uint64_t)`](https://github.com/solana-labs/solana/blob/d2ee9db2143859fa5dc26b15ee6da9c25cc0429c/sdk/sbf/c/inc/solana_sdk.h#L134) - -The [debugging](/docs/programs/debugging.md#logging) section has more -information about working with program logs. - -## Compute Budget - -Use the system call `sol_remaining_compute_units()` to return a `u64` indicating -the number of compute units remaining for this transaction. - -Use the system call -[`sol_log_compute_units()`](https://github.com/solana-labs/solana/blob/d3a3a7548c857f26ec2cb10e270da72d373020ec/sdk/sbf/c/inc/solana_sdk.h#L140) -to log a message containing the remaining number of compute units the program -may consume before execution is halted - -See the [Compute Budget](/docs/core/fees.md#compute-budget) documentation for -more information. - -## ELF Dump - -The SBF shared object internals can be dumped to a text file to gain more -insight into a program's composition and what it may be doing at runtime. The -dump will contain both the ELF information as well as a list of all the symbols -and the instructions that implement them. Some of the SBF loader's error log -messages will reference specific instruction numbers where the error occurred. -These references can be looked up in the ELF dump to identify the offending -instruction and its context. - -To create a dump file: - -```shell -cd -make dump_ -``` - -## Examples - -The -[Solana Program Library github](https://github.com/solana-labs/solana-program-library/tree/master/examples/c) -repo contains a collection of C examples diff --git a/docs/programs/lang-rust.md b/docs/programs/lang-rust.md index 9a6a8cefa..503ceaccb 100644 --- a/docs/programs/lang-rust.md +++ b/docs/programs/lang-rust.md @@ -1,22 +1,14 @@ --- title: "Developing with Rust" +sidebarSortOrder: 4 --- Solana supports writing onchain programs using the [Rust](https://www.rust-lang.org/) programming language. - - -To quickly get started with Solana development and build your first Rust -program, take a look at these detailed quick start guides: - -- [Build and deploy your first Solana program using only your browser](/content/guides/getstarted/hello-world-in-your-browser.md). - No installation needed. - [Setup your local environment](/docs/intro/installation) and use the local test validator. - - ## Project Layout Solana Rust programs follow the typical @@ -295,14 +287,10 @@ can emulate `println!` by using `format!`: msg!("Some variable: {:?}", variable); ``` -The [debugging](/docs/programs/debugging.md#logging) section has more -information about working with program logs the [Rust examples](#examples) -contains a logging example. - ## Panicking Rust's `panic!`, `assert!`, and internal panic results are printed to the -[program logs](/docs/programs/debugging.md#logging) by default. +program logs by default. ```shell INFO solana_runtime::message_processor] Finalized account CGLhHSuWsp1gT4B7MY2KACqp9RUwQRhcUFfVSuxpSajZ diff --git a/docs/programs/limitations.md b/docs/programs/limitations.md index 1a4469669..1ec59ccc3 100644 --- a/docs/programs/limitations.md +++ b/docs/programs/limitations.md @@ -1,5 +1,6 @@ --- title: "Limitations" +sidebarSortOrder: 6 --- Developing programs on the Solana blockchain have some inherent limitation @@ -12,9 +13,35 @@ Since Rust based onchain programs must run be deterministic while running in a resource-constrained, single-threaded environment, they have some limitations on various libraries. -See -[Developing with Rust - Restrictions](/docs/programs/lang-rust.md#restrictions) -for a detailed breakdown these restrictions and limitations. +On-chain Rust programs support most of Rust's libstd, libcore, and liballoc, as +well as many 3rd party crates. + +There are some limitations since these programs run in a resource-constrained, +single-threaded environment, as well as being deterministic: + +- No access to + - `rand` + - `std::fs` + - `std::net` + - `std::future` + - `std::process` + - `std::sync` + - `std::task` + - `std::thread` + - `std::time` +- Limited access to: + - `std::hash` + - `std::os` +- Bincode is extremely computationally expensive in both cycles and call depth + and should be avoided +- String formatting should be avoided since it is also computationally + expensive. +- No support for `println!`, `print!`, use the + [`msg!`](https://github.com/solana-labs/solana/blob/d9b0fc0e3eec67dfe4a97d9298b15969b2804fab/sdk/program/src/log.rs#L33) + macro instead. +- The runtime enforces a limit on the number of instructions a program can + execute during the processing of one instruction. See + [computation budget](/docs/core/fees.md#compute-budget) for more information. ## Compute budget @@ -82,6 +109,4 @@ added to support writable data. ## Signed division -The SBF instruction set does not support -[signed division](https://www.kernel.org/doc/html/latest/bpf/bpf_design_QA.Html#q-why-there-is-no-bpf-sdiv-for-signed-divide-operation). -Adding a signed division instruction is a consideration. +The SBF instruction set does not support signed division. diff --git a/docs/programs/overview.md b/docs/programs/overview.md deleted file mode 100644 index 3e937a2c1..000000000 --- a/docs/programs/overview.md +++ /dev/null @@ -1,105 +0,0 @@ ---- -title: Overview of Developing On-chain Programs -sidebarLabel: Overview -sidebarSortOrder: 0 -altRoutes: - - /docs/programs ---- - -Developers can write and deploy their own programs to the Solana blockchain. -This process can be broadly summarized into a few key steps. - - - -To quickly get started with Solana development and build your first Rust -program, take a look at these detailed quick start guides: - -- [Build and deploy your first Solana program using only your browser](/content/guides/getstarted/hello-world-in-your-browser.md). - No installation needed. -- [Setup your local environment](/docs/intro/installation) and use the local - test validator. - - - -## On-chain program development lifecycle - -1. Setup your development environment -2. Write your program -3. Compile the program -4. Generate the program's public address -5. Deploy the program - -### 1. Setup your development environment - -The most robust way of getting started with Solana development, is -[installing the Solana CLI](/docs/intro/installation.md) tools on your local -computer. This will allow you to have the most powerful development environment. - -Some developers may also opt for using -[Solana Playground](https://beta.solpg.io/), a browser based IDE. It will let -you write, build, and deploy onchain programs. All from your browser. No -installation needed. - -### 2. Write your program - -Writing Solana programs is most commonly done so using the Rust language. These -Rust programs are effectively the same as creating a traditional -[Rust library](https://doc.rust-lang.org/rust-by-example/crates/lib.html). - -> You can read more about other [supported languages](#support-languages) below. - -### 3. Compile the program - -Once the program is written, it must be complied down to -[Berkley Packet Filter](/docs/programs/faq.md#berkeley-packet-filter-bpf) -byte-code that will then be deployed to the blockchain. - -### 4. Generate the program's public address - -Using the [Solana CLI](/docs/intro/installation.md), the developer will generate -a new unique [Keypair](/docs/terminology.md#keypair) for the new program. The -public address (aka [Pubkey](/docs/terminology.md#public-key-pubkey)) from this -Keypair will be used on-chain as the program's public address (aka -[`programId`](/docs/terminology.md#program-id)). - -### 5. Deploying the program - -Then again using the CLI, the compiled program can be deployed to the selected -blockchain cluster by creating many transactions containing the program's -byte-code. Due to the transaction memory size limitations, each transaction -effectively sends small chunks of the program to the blockchain in a rapid-fire -manner. - -Once the entire program has been sent to the blockchain, a final transaction is -sent to write all of the buffered byte-code to the program's data account. This -either mark the new program as `executable`, or complete the process to upgrade -an existing program (if it already existed). - -## Support languages - -Solana programs are typically written in the -[Rust language](/docs/programs/lang-rust.md), but -[C/C++](/docs/programs/lang-c.md) are also supported. - -There are also various community driven efforts to enable writing on-chain -programs using other languages, including: - -- Python via [Seahorse](https://seahorse.dev/) (that acts as a wrapper the Rust - based Anchor framework) - -## Example programs - -You can also explore the [Program Examples](/docs/programs/examples.md) for -examples of onchain programs. - -## Limitations - -As you dive deeper into program development, it is important to understand some -of the important limitations associated with onchain programs. - -Read more details on the [Limitations](/docs/programs/limitations.md) page - -## Frequently asked questions - -Discover many of the [frequently asked questions](/docs/programs/faq.md) other -developers have about writing/understanding Solana programs. diff --git a/docs/programs/testing.md b/docs/programs/testing.md index 32d3bcdc3..2af73ebe5 100644 --- a/docs/programs/testing.md +++ b/docs/programs/testing.md @@ -1,6 +1,7 @@ --- title: "Testing with NodeJS" description: "Testing native solana programs written with rust using NodeJS" +sidebarSortOrder: 5 --- When developing programs on Solana, ensuring their correctness and reliability @@ -232,16 +233,3 @@ This is how the output looks like after running the tests for ℹ todo 0 ℹ duration_ms 63.52616 ``` - -## Next Steps - -- Checkout more testing examples from the - [Program Examples](/docs/programs/examples.md) -- You can also - use [anchor-bankrun](https://kevinheavey.github.io/solana-bankrun/tutorial/#anchor-integration) to - write tests in NodeJS for Anchor programs -- [Writing and testing your Solana programs using Rust](https://solana.com/docs/programs/lang-rust#how-to-test) - is possible with - [solana_program_test](https://docs.rs/solana-program-test/1.18.14/solana_program_test/) -- You can also write test with python for Solana programs written in Rust with - [solders.bankrun](https://kevinheavey.github.io/solders/api_reference/bankrun.html) diff --git a/docs/rpc/http/getTokenAccountsByOwner.mdx b/docs/rpc/http/getTokenAccountsByOwner.mdx index 84d5d1fd0..d60af8eb0 100644 --- a/docs/rpc/http/getTokenAccountsByOwner.mdx +++ b/docs/rpc/http/getTokenAccountsByOwner.mdx @@ -18,9 +18,9 @@ Returns all SPL Token accounts by token owner. Pubkey of account delegate to query, as base-58 encoded string - + -A JSON object with one of the following fields: +A JSON object with either one of the following fields: - `mint: ` - Pubkey of the specific token Mint to limit accounts to, as base-58 encoded string; or @@ -121,9 +121,9 @@ curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" - "id": 1, "method": "getTokenAccountsByOwner", "params": [ - "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F", + "A1TMhSGzQxMr1TboBKtgixKz1sS6REASMxPo1qsyTSJd", { - "mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E" + "programId": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA" }, { "encoding": "jsonParsed" @@ -137,52 +137,74 @@ curl https://api.devnet.solana.com -X POST -H "Content-Type: application/json" - ```json { + "id": 1, "jsonrpc": "2.0", "result": { "context": { - "slot": 1114 + "apiVersion": "2.0.8", + "slot": 329669901 }, "value": [ { "account": { "data": { - "program": "spl-token", "parsed": { - "accountType": "account", "info": { - "tokenAmount": { - "amount": "1", - "decimals": 1, - "uiAmount": 0.1, - "uiAmountString": "0.1" - }, - "delegate": "4Nd1mBQtrMJVYVfKf2PJy9NZUZdTAsp7D4xWLs4gDB4T", - "delegatedAmount": { - "amount": "1", - "decimals": 1, - "uiAmount": 0.1, - "uiAmountString": "0.1" - }, + "isNative": false, + "mint": "BejB75Gmq8btLboHx7yffWcurHVBv5xvKcnY1fBYxnvf", + "owner": "A1TMhSGzQxMr1TboBKtgixKz1sS6REASMxPo1qsyTSJd", "state": "initialized", + "tokenAmount": { + "amount": "10000000000000", + "decimals": 9, + "uiAmount": 10000, + "uiAmountString": "10000" + } + }, + "type": "account" + }, + "program": "spl-token", + "space": 165 + }, + "executable": false, + "lamports": 2039280, + "owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", + "rentEpoch": 18446744073709551615, + "space": 165 + }, + "pubkey": "5HvuXcy57o41qtGBBJM7dRN9DS6G3jd9KEhHt4eYqJmB" + }, + { + "account": { + "data": { + "parsed": { + "info": { "isNative": false, - "mint": "3wyAj7Rt1TWVPZVteFJPLa26JmLvdb1CAKEFZm3NY75E", - "owner": "4Qkev8aNZcqFNSRhQzwyLMFSsi94jHqE8WNVTJzTP99F" + "mint": "FSX34rYUJ4zfdD7z4p3L1Fd1pGiiErusaSNTfgKqhep6", + "owner": "A1TMhSGzQxMr1TboBKtgixKz1sS6REASMxPo1qsyTSJd", + "state": "initialized", + "tokenAmount": { + "amount": "10000000000000", + "decimals": 9, + "uiAmount": 10000, + "uiAmountString": "10000" + } }, "type": "account" }, + "program": "spl-token", "space": 165 }, "executable": false, - "lamports": 1726080, + "lamports": 2039280, "owner": "TokenkegQfeZyiNwAJbNbGKPFXCWuBvf9Ss623VQ5DA", - "rentEpoch": 4, + "rentEpoch": 18446744073709551615, "space": 165 }, - "pubkey": "C2gJg6tKpQs41PRS1nC8aw3ZKNZK3HQQZGVrDFDup5nx" + "pubkey": "HvTGvCP4tg2wVdFtqZCTdMPHDXmkYwNAxaTBCHabqh2X" } ] - }, - "id": 1 + } } ``` diff --git a/docs/terminology.md b/docs/terminology.md index 2ce27df49..5f94c07bd 100644 --- a/docs/terminology.md +++ b/docs/terminology.md @@ -82,7 +82,7 @@ a block chain. ## BPF loader The Solana program that owns and loads -[BPF](/docs/programs/faq.md#berkeley-packet-filter-bpf) +[BPF](/docs/core/programs#berkeley-packet-filter-bpf) [onchain programs](#onchain-program), allowing the program to interface with the runtime. diff --git a/package.json b/package.json index f09b9b1f3..91ef33165 100644 --- a/package.json +++ b/package.json @@ -10,10 +10,11 @@ "scripts": { "runner": "npx ts-node -r tsconfig-paths/register", "contentlayer:build": "npx contentlayer2 build --clearCache", - "dev": "yarn contentlayer:build && next dev -p 3001", - "build": "yarn prettier:i18n && yarn contentlayer:build && next build", + "code-import": "npx esrun coder.ts", + "dev": "yarn code-import && yarn contentlayer:build && concurrently -p \"[{name}]\" -n \"code import,next dev\" -c \"bgBlue.bold,bgGreen.bold\" \"yarn code-import --watch\" \"next dev -p 3001\"", + "build": "yarn code-import && yarn prettier:i18n && yarn contentlayer:build && next build", "start": "next start -p 3001", - "test": "yarn contentlayer:build", + "test": "yarn code-import && yarn prettier && && yarn contentlayer:build", "lint": "next lint", "prettier:i18n": "prettier -cw \"i18n/**/*.{js,jsx,ts,tsx,md,css,md,mdx}\" --ignore-path \"[]\"", "prettier": "prettier -c \"./**/*.{js,jsx,ts,tsx,md,css,md,mdx}\"", @@ -21,7 +22,8 @@ "crowdin:download": "crowdin download && npm run prettier:i18n", "crowdin:upload": "crowdin upload sources", "browser-sync": "browser-sync start --proxy \"localhost:3000\" --files \"**/*.md\"", - "dev:sync": "yarn dev & (sleep 3 && yarn browser-sync)" + "prepare": "node -e \"if (process.env.NODE_ENV !== 'production'){process.exit(1)} \" || husky install", + "dev:sync": "yarn dev & (sleep 5 && yarn browser-sync)" }, "dependencies": { "@crowdin/cli": "^3.18.0", @@ -40,7 +42,25 @@ "typescript": "5.3.3" }, "devDependencies": { + "@types/mdast": "^4.0.4", + "chokidar": "^3.6.0", + "concurrently": "^8.2.2", "contentlayer2": "^0.4.6", - "prettier": "^3.2.4" + "husky": "^9.1.4", + "ignore": "^5.3.1", + "lint-staged": "^15.2.7", + "mdast": "^3.0.0", + "prettier": "^3.2.4", + "remark": "^15.0.1", + "remark-frontmatter": "^5.0.0", + "remark-parse": "^11.0.0", + "remark-stringify": "^11.0.0", + "strip-indent": "^4.0.0", + "unified": "^11.0.5", + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.2" + }, + "lint-staged": { + "*.{js,jsx,json,ts,tsx,md,css,md,mdx,yml,yaml}": "yarn prettier:fix" } } diff --git a/public/assets/courses/unboxed/basic-solana-mobile-connect.png b/public/assets/courses/unboxed/basic-solana-mobile-connect.png index e54494c06..6f9c61911 100644 Binary files a/public/assets/courses/unboxed/basic-solana-mobile-connect.png and b/public/assets/courses/unboxed/basic-solana-mobile-connect.png differ diff --git a/public/assets/courses/unboxed/basic-solana-mobile-flow.png b/public/assets/courses/unboxed/basic-solana-mobile-flow.png index a030480d6..950cd2c5c 100644 Binary files a/public/assets/courses/unboxed/basic-solana-mobile-flow.png and b/public/assets/courses/unboxed/basic-solana-mobile-flow.png differ diff --git a/public/assets/courses/unboxed/basic-solana-mobile-transact.png b/public/assets/courses/unboxed/basic-solana-mobile-transact.png index ae03da06d..646058a84 100644 Binary files a/public/assets/courses/unboxed/basic-solana-mobile-transact.png and b/public/assets/courses/unboxed/basic-solana-mobile-transact.png differ diff --git a/public/assets/courses/unboxed/burn-tokens.png b/public/assets/courses/unboxed/burn-tokens.png new file mode 100644 index 000000000..1547f718a Binary files /dev/null and b/public/assets/courses/unboxed/burn-tokens.png differ diff --git a/public/assets/courses/unboxed/delegate-token.png b/public/assets/courses/unboxed/delegate-token.png new file mode 100644 index 000000000..b5e954f69 Binary files /dev/null and b/public/assets/courses/unboxed/delegate-token.png differ diff --git a/public/assets/courses/unboxed/revoke-approve-tokens.png b/public/assets/courses/unboxed/revoke-approve-tokens.png new file mode 100644 index 000000000..0cf4f8cf1 Binary files /dev/null and b/public/assets/courses/unboxed/revoke-approve-tokens.png differ diff --git a/public/assets/courses/unboxed/solana-explorer-create-tree.png b/public/assets/courses/unboxed/solana-explorer-create-tree.png new file mode 100644 index 000000000..cfe10dff5 Binary files /dev/null and b/public/assets/courses/unboxed/solana-explorer-create-tree.png differ diff --git a/public/assets/courses/unboxed/solana-explorer-showing-cnft-transfer-logs.png b/public/assets/courses/unboxed/solana-explorer-showing-cnft-transfer-logs.png new file mode 100644 index 000000000..2cf699e0b Binary files /dev/null and b/public/assets/courses/unboxed/solana-explorer-showing-cnft-transfer-logs.png differ diff --git a/src/utils/code-import.ts b/src/utils/code-import.ts new file mode 100644 index 000000000..b31e0277a --- /dev/null +++ b/src/utils/code-import.ts @@ -0,0 +1,169 @@ +// remark-code-import +// code-import.ts +// https://github.com/kevin940726/remark-code-import +import { readFile, stat } from "node:fs/promises"; +import path from "node:path"; +import { EOL } from "node:os"; +import { visit } from "unist-util-visit"; +import stripIndent from "strip-indent"; +import type { Root, Code, Parent } from "mdast"; +import type { VFile } from "vfile"; + +interface CodeImportOptions { + async?: boolean; + preserveTrailingNewline?: boolean; + removeRedundantIndentations?: boolean; + rootDir?: string; + allowImportingFromOutside?: boolean; +} + +interface LineRange { + from: number; + to: number; +} + +function parseLineRanges(rangeString: string): LineRange[] { + const rangeRegex = /#L(\d+)(?:-L?(\d+))?/g; + const ranges: LineRange[] = []; + let match; + + while ((match = rangeRegex.exec(rangeString)) !== null) { + const [, from, to] = match; + const fromLine = parseInt(from, 10); + const toLine = to ? parseInt(to, 10) : fromLine; + + if (fromLine === 0 || toLine === 0) { + throw new Error( + `Invalid line number: Line numbers must be positive integers`, + ); + } + + if (fromLine > toLine) { + throw new Error( + `Invalid range: L${fromLine}-L${toLine}. 'from' should be less than or equal to 'to'`, + ); + } + + ranges.push({ from: fromLine, to: toLine }); + } + + // Sort ranges and check for overlaps + ranges.sort((a, b) => a.from - b.from); + for (let i = 1; i < ranges.length; i++) { + if (ranges[i].from <= ranges[i - 1].to) { + throw new Error(`Overlapping or out-of-order ranges are not allowed`); + } + } + + return ranges; +} + +function extractLines( + content: string, + ranges: LineRange[], + preserveTrailingNewline = false, +): string { + const lines = content.split(EOL); + let result: string[] = []; + + for (const range of ranges) { + if (range.to > lines.length) { + throw new Error( + `Line range exceeds file length of ${lines.length} lines`, + ); + } + result = result.concat(lines.slice(range.from - 1, range.to)); + } + + let finalResult = result.join("\n"); + if ( + preserveTrailingNewline && + content.endsWith("\n") && + !finalResult.endsWith("\n") + ) { + finalResult += "\n"; + } + + return finalResult; +} + +function importCode(options: CodeImportOptions = {}) { + const rootDir = options.rootDir || process.cwd(); + + if (!path.isAbsolute(rootDir)) { + throw new Error(`"rootDir" has to be an absolute path`); + } + + return async function transform(tree: Root, file: VFile) { + const codes: [Code, number | null, Parent][] = []; + + visit(tree, "code", (node, index, parent) => { + codes.push([node as Code, index as null | number, parent as Parent]); + }); + + for (const [node] of codes) { + const fileMeta = (node.meta || "") + .split(/(? meta.startsWith("file=")); + + if (!fileMeta) { + continue; + } + + const res = /^file=(["'])?(\/.+?)\1?(#.+)?$/.exec(fileMeta); + + if (!res) { + throw new Error( + `Unable to parse file path ${fileMeta}. File path must start with a forward slash (/)`, + ); + } + + const [, , filePath, rangeString = ""] = res; + + // Resolve the path relative to rootDir + const normalizedFilePath = path.join(rootDir, filePath.slice(1)); + const fileAbsPath = path.resolve(normalizedFilePath); + + try { + // Check if the path is a directory + const stats = await stat(fileAbsPath); + if (stats.isDirectory()) { + throw new Error(`Path is a directory, not a file`); + } + + if (!options.allowImportingFromOutside) { + const relativePathFromRootDir = path.relative(rootDir, fileAbsPath); + if ( + relativePathFromRootDir.startsWith(`..${path.sep}`) || + path.isAbsolute(relativePathFromRootDir) + ) { + throw new Error( + `Attempted to import code from "${fileAbsPath}", which is outside from the rootDir "${rootDir}"`, + ); + } + } + + const ranges = rangeString + ? parseLineRanges(rangeString) + : [{ from: 1, to: Infinity }]; + + const fileContent = await readFile(fileAbsPath, "utf8"); + node.value = extractLines( + fileContent, + ranges, + options.preserveTrailingNewline, + ); + if (options.removeRedundantIndentations) { + node.value = stripIndent(node.value); + } + } catch (error) { + throw new Error( + `Error processing ${fileAbsPath}: ${(error as Error).message}`, + ); + } + } + }; +} + +export { importCode }; +export default importCode; diff --git a/tsconfig.json b/tsconfig.json index 26231d6e6..d90767bb3 100644 --- a/tsconfig.json +++ b/tsconfig.json @@ -1,6 +1,6 @@ { "compilerOptions": { - "target": "es5", + "target": "ESNext", "lib": ["dom", "dom.iterable", "esnext"], "skipLibCheck": true, "allowJs": true, @@ -18,20 +18,20 @@ "paths": { "contentlayer/generated": ["./.contentlayer/generated"], "@/*": ["./src/*"], - "@@/*": ["./*"] + "@@/*": ["./*"], }, "plugins": [ { - "name": "next" - } - ] + "name": "next", + }, + ], }, "include": [ ".contentlayer/generated", "next-env.d.ts", "**/*.ts", "**/*.tsx", - ".next/types/**/*.ts" + ".next/types/**/*.ts", ], - "exclude": ["node_modules"] + "exclude": ["node_modules", "code"], } diff --git a/yarn.lock b/yarn.lock index 31f18b656..9336a24b3 100644 --- a/yarn.lock +++ b/yarn.lock @@ -4642,4 +4642,4 @@ zod@^3.22.4: zwitch@^2.0.0, zwitch@^2.0.4: version "2.0.4" resolved "https://registry.npmjs.org/zwitch/-/zwitch-2.0.4.tgz" - integrity sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A== + integrity sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A== \ No newline at end of file